am ce52ba38: Build libdexdump_static.

* commit 'ce52ba385f7f84cd47fdf004640de8f53181114d':
  Build libdexdump_static.
diff --git a/dexdump/DexDump.c b/dexdump/DexDump.c
index bc18213..9353471 100644
--- a/dexdump/DexDump.c
+++ b/dexdump/DexDump.c
@@ -29,14 +29,17 @@
  * - no generic signatures on parameters, e.g. type="java.lang.Class<?>"
  * - class shows declared fields and methods; does not show inherited fields
  */
+
 #include "libdex/DexFile.h"
+
+#include "libdex/CmdUtils.h"
 #include "libdex/DexCatch.h"
 #include "libdex/DexClass.h"
+#include "libdex/DexDebugInfo.h"
 #include "libdex/DexOpcodes.h"
 #include "libdex/DexProto.h"
 #include "libdex/InstrUtils.h"
 #include "libdex/SysUtil.h"
-#include "libdex/CmdUtils.h"
 
 #include <stdlib.h>
 #include <stdio.h>
@@ -721,6 +724,7 @@
         width = 4;
         break;
     case kFmt31c:
+    case kFmt40sc:
     case kFmt41c:
     case kFmt5rc:
         index = pDecInsn->vB;
diff --git a/dexlist/DexList.c b/dexlist/DexList.c
index 3552d2e..5326692 100644
--- a/dexlist/DexList.c
+++ b/dexlist/DexList.c
@@ -17,11 +17,14 @@
 /*
  * List all methods in all concrete classes in one or more DEX files.
  */
+
 #include "libdex/DexFile.h"
+
+#include "libdex/CmdUtils.h"
 #include "libdex/DexClass.h"
+#include "libdex/DexDebugInfo.h"
 #include "libdex/DexProto.h"
 #include "libdex/SysUtil.h"
-#include "libdex/CmdUtils.h"
 
 #include <stdlib.h>
 #include <stdio.h>
diff --git a/dexopt/OptMain.c b/dexopt/OptMain.c
index 194a62b..7ecd1e0 100644
--- a/dexopt/OptMain.c
+++ b/dexopt/OptMain.c
@@ -151,6 +151,7 @@
             case 'n':   dexOptMode = OPTIMIZE_MODE_NONE;        break;
             case 'v':   dexOptMode = OPTIMIZE_MODE_VERIFIED;    break;
             case 'a':   dexOptMode = OPTIMIZE_MODE_ALL;         break;
+            case 'f':   dexOptMode = OPTIMIZE_MODE_FULL;        break;
             default:                                            break;
             }
         }
@@ -549,17 +550,6 @@
     }
 #endif
 
-    //dvmLinearAllocDump(NULL);
-
-#if 0
-    {
-        extern int gDvm__totalInstr, gDvm__gcInstr, gDvm__gcData,
-               gDvm__gcSimpleData;
-        LOGI("GC DATA: totinst=%d, gcinst=%d, gcdata=%d simpled=%d\n",
-            gDvm__totalInstr, gDvm__gcInstr, gDvm__gcData, gDvm__gcSimpleData);
-    }
-#endif
-
     free(bootClassPath);
     LOGV("DexOpt command complete (result=%d)\n", result);
     return result;
diff --git a/docs/dalvik-bytecode.html b/docs/dalvik-bytecode.html
index 19931d4..15294e8 100644
--- a/docs/dalvik-bytecode.html
+++ b/docs/dalvik-bytecode.html
@@ -68,7 +68,8 @@
   (after the operation).
 </li>
 <li>There are several "pseudo-instructions" that are used to hold
-  variable-length data referred to by regular instructions (for example,
+  variable-length data payloads, which are referred to by regular
+  instructions (for example,
   <code>fill-array-data</code>). Such instructions must never be
   encountered during the normal flow of execution. In addition, the
   instructions must be located on even-numbered bytecode offsets (that is,
@@ -164,9 +165,9 @@
     <p><b>Note:</b>
     Data-bearing pseudo-instructions are tagged with this opcode, in which
     case the high-order byte of the opcode unit indicates the nature of
-    the data. See "<code>packed-switch</code> Format",
-    "<code>sparse-switch</code> Format", and
-    "<code>fill-array-data</code> Format" below.</p>
+    the data. See "<code>packed-switch-payload</code> Format",
+    "<code>sparse-switch-payload</code> Format", and
+    "<code>fill-array-data-payload</code> Format" below.</p>
   </td>
 </tr>
 <tr>
@@ -509,7 +510,7 @@
 <tr>
   <td>26 31t</td>
   <td>fill-array-data vAA, +BBBBBBBB <i>(with supplemental data as specified
-    below in "<code>fill-array-data</code> Format")</i></td>
+    below in "<code>fill-array-data-payload</code> Format")</i></td>
   <td><code>A:</code> array reference (8 bits)<br/>
     <code>B:</code> signed "branch" offset to table data pseudo-instruction
     (32 bits)
@@ -558,7 +559,7 @@
 <tr>
   <td>2b 31t</td>
   <td>packed-switch vAA, +BBBBBBBB <i>(with supplemental data as
-    specified below in "<code>packed-switch</code> Format")</i></td>
+    specified below in "<code>packed-switch-payload</code> Format")</i></td>
   <td><code>A:</code> register to test<br/>
     <code>B:</code> signed "branch" offset to table data pseudo-instruction
     (32 bits)
@@ -572,7 +573,7 @@
 <tr>
   <td>2c 31t</td>
   <td>sparse-switch vAA, +BBBBBBBB <i>(with supplemental data as
-    specified below in "<code>sparse-switch</code> Format")</i></td>
+    specified below in "<code>sparse-switch-payload</code> Format")</i></td>
   <td><code>A:</code> register to test<br/>
     <code>B:</code> signed "branch" offset to table data pseudo-instruction
     (32 bits)
@@ -1134,7 +1135,7 @@
 </tbody>
 </table>
 
-<h2><code>packed-switch</code> Format</h2>
+<h2><code>packed-switch-payload</code> Format</h2>
 
 <table class="supplement">
 <thead>
@@ -1173,7 +1174,7 @@
 <p><b>Note:</b> The total number of code units for an instance of this
 table is <code>(size * 2) + 4</code>.</p>
 
-<h2><code>sparse-switch</code> Format</h2>
+<h2><code>sparse-switch-payload</code> Format</h2>
 
 <table class="supplement">
 <thead>
@@ -1213,7 +1214,7 @@
 <p><b>Note:</b> The total number of code units for an instance of this
 table is <code>(size * 4) + 2</code>.</p>
 
-<h2><code>fill-array-data</code> Format</h2>
+<h2><code>fill-array-data-payload</code> Format</h2>
 
 <table class="supplement">
 <thead>
diff --git a/docs/dex-format.html b/docs/dex-format.html
index ea92c67..5a71b59 100644
--- a/docs/dex-format.html
+++ b/docs/dex-format.html
@@ -176,7 +176,7 @@
     used by this file, either for internal naming (e.g., type descriptors)
     or as constant objects referred to by code. This list must be sorted
     by string contents, using UTF-16 code point values (not in a
-    locale-sensitive manner).
+    locale-sensitive manner), and it must not contain any duplicate entries.
   </td>
 </tr>
 <tr>
@@ -185,7 +185,7 @@
   <td>type identifiers list. These are identifiers for all types (classes,
     arrays, or primitive types) referred to by this file, whether defined
     in the file or not. This list must be sorted by <code>string_id</code>
-    index.
+    index, and it must not contain any duplicate entries.
   </td>
 </tr>
 <tr>
@@ -194,7 +194,8 @@
   <td>method prototype identifiers list. These are identifiers for all
     prototypes referred to by this file. This list must be sorted in
     return-type (by <code>type_id</code> index) major order, and then
-    by arguments (also by <code>type_id</code> index).
+    by arguments (also by <code>type_id</code> index). The list must not
+    contain any duplicate entries.
   </td>
 </tr>
 <tr>
@@ -205,7 +206,7 @@
     list must be sorted, where the defining type (by <code>type_id</code>
     index) is the major order, field name (by <code>string_id</code> index)
     is the intermediate order, and type (by <code>type_id</code> index)
-    is the minor order.
+    is the minor order. The list must not contain any duplicate entries.
   </td>
 </tr>
 <tr>
@@ -215,8 +216,9 @@
     referred to by this file, whether defined in the file or not. This
     list must be sorted, where the defining type (by <code>type_id</code>
     index) is the major order, method name (by <code>string_id</code>
-    index) is the intermediate order, and method
-    prototype (by <code>proto_id</code> index) is the minor order.
+    index) is the intermediate order, and method prototype (by
+    <code>proto_id</code> index) is the minor order.  The list must not
+    contain any duplicate entries.
   </td>
 </tr>
 <tr>
@@ -224,7 +226,9 @@
   <td>class_def_item[]</td>
   <td>class definitions list. The classes must be ordered such that a given
     class's superclass and implemented interfaces appear in the
-    list earlier than the referring class.
+    list earlier than the referring class. Furthermore, it is invalid for
+    a definition for the same-named class to appear more than once in
+    the list.
   </td>
 </tr>
 <tr>
@@ -240,8 +244,8 @@
   <td>link_data</td>
   <td>ubyte[]</td>
   <td>data used in statically linked files. The format of the data in
-    this section is left unspecified by this document;
-    this section is empty in unlinked files, and runtime implementations
+    this section is left unspecified by this document.
+    This section is empty in unlinked files, and runtime implementations
     may use it as they see fit.
   </td>
 </tr>
@@ -270,10 +274,10 @@
 <p><b>Note:</b> At least a couple earlier versions of the format have
 been used in widely-available public software releases. For example,
 version <code>009</code> was used for the M3 releases of the
-Android platform (November-December 2007),
+Android platform (November&ndash;December 2007),
 and version <code>013</code> was used for the M5 releases of the Android
-platform (February-March 2008). In several respects, these earlier versions
-of the format differ significantly from the version described in this
+platform (February&ndash;March 2008). In several respects, these earlier
+versions of the format differ significantly from the version described in this
 document.</p>
 
 <h2><code>ENDIAN_CONSTANT</code> and <code>REVERSE_ENDIAN_CONSTANT</code></h2>
@@ -827,7 +831,7 @@
 <p>A <i>SimpleName</i> is the basis for the syntax of the names of other
 things. The <code>.dex</code> format allows a fair amount of latitude
 here (much more than most common source languages). In brief, a simple
-name may consist of any low-ASCII alphabetic character or digit, a few
+name consists of any low-ASCII alphabetic character or digit, a few
 specific low-ASCII symbols, and most non-ASCII code points that are not
 control, space, or special characters. Note that surrogate code points
 (in the range <code>U+d800</code> &hellip; <code>U+dfff</code>) are not
@@ -1320,7 +1324,7 @@
 <p>This is a list of the entire contents of a file, in order. It
 contains some redundancy with respect to the <code>header_item</code>
 but is intended to be an easy form to use to iterate over an entire
-file. A given type may appear at most once in a map, but there is no
+file. A given type must appear at most once in a map, but there is no
 restriction on what order types may appear in, other than the
 restrictions implied by the rest of the format (e.g., a
 <code>header</code> section must appear first, followed by a
@@ -2091,7 +2095,7 @@
 <tr>
   <td>tries</td>
   <td>try_item[tries_size] <i>(optional)</i></td>
-  <td>array indicating where in the code exceptions may be caught and
+  <td>array indicating where in the code exceptions are caught and
     how to handle them. Elements of the array must be non-overlapping in
     range and in order from low to high address. This element is only
     present if <code>tries_size</code> is non-zero.
@@ -2255,7 +2259,7 @@
 <code>address</code> register represents the instruction offset in the
 associated <code>insns_item</code> in 16-bit code units. The
 <code>address</code> register starts at <code>0</code> at the beginning of each
-<code>debug_info</code> sequence and may only monotonically increase.
+<code>debug_info</code> sequence and must only monotonically increase.
 The <code>line</code> register represents what source line number
 should be associated with the next positions table entry emitted by
 the state machine. It is initialized in the sequence header, and may
@@ -2867,7 +2871,7 @@
 which is either defined as a member of another class, per se, or is
 anonymous but not defined within a method body (e.g., a synthetic
 inner class). Every class that has this annotation must also have an
-<code>InnerClass</code> annotation. Additionally, a class may not have
+<code>InnerClass</code> annotation. Additionally, a class must not have
 both an <code>EnclosingClass</code> and an
 <code>EnclosingMethod</code> annotation.</p>
 
@@ -2894,7 +2898,7 @@
 <p>An <code>EnclosingMethod</code> annotation is attached to each class
 which is defined inside a method body. Every class that has this
 annotation must also have an <code>InnerClass</code> annotation.
-Additionally, a class may not have both an <code>EnclosingClass</code>
+Additionally, a class must not have both an <code>EnclosingClass</code>
 and an <code>EnclosingMethod</code> annotation.</p>
 
 <table class="format">
diff --git a/docs/embedded-vm-control.html b/docs/embedded-vm-control.html
index 11751e0..a0bdd29 100644
--- a/docs/embedded-vm-control.html
+++ b/docs/embedded-vm-control.html
@@ -13,7 +13,6 @@
     <li><a href="#assertions">Assertions</a>
     <li><a href="#verifier">Bytecode Verification and Optimization</a>
     <li><a href="#execmode">Execution Mode</a>
-    <li><a href="#dp">Deadlock Prediction</a>
     <li><a href="#stackdump">Stack Dumps</a>
     <li><a href="#dexcheck">DEX File Checksums</a>
     <li><a href="#general">General Flags</a>
@@ -207,40 +206,6 @@
 incorrectly.
 
 
-<h2><a name="dp">Deadlock Prediction</a></h2>
-
-<p>If the VM is built with <code>WITH_DEADLOCK_PREDICTION</code>, the deadlock
-predictor can be enabled with the <code>-Xdeadlockpredict</code> argument.
-(The output from <code>dalvikvm -help</code> will tell you if the VM was
-built appropriately -- look for <code>deadlock_prediction</code> on the
-<code>Configured with:</code> line.)
-This feature tells the VM to keep track of the order in which object
-monitor locks are acquired.  If the program attempts to acquire a set
-of locks in a different order from what was seen earlier, the VM logs
-a warning and optionally throws an exception.
-
-<p>Valid values for the command-line argument are
-<code>off</code> to disable it (default), <code>warn</code> to log the
-problem but continue executing, <code>err</code> to cause a
-<code>dalvik.system.PotentialDeadlockError</code> to be thrown from the
-<code>monitor-enter</code> instruction, and <code>abort</code> to have
-the entire VM abort.
-
-<p>You will usually want to use:
-<pre>adb shell setprop dalvik.vm.extra-opts -Xdeadlockpredict:err</pre>
-unless you are keeping an eye on the logs as they scroll by.
-
-<p>Please note that this feature is deadlock prediction, not deadlock
-detection -- in the current implementation, the computations are performed
-after the lock is acquired (this simplifies the code, reducing the
-overhead added to every mutex operation).  You can spot a deadlock in a
-hung process by sending a <code>kill -3</code> and examining the stack
-trace written to the log.
-
-<p>This only takes monitors into account.  Native mutexes and other resources
-can also be the cause of deadlocks, but will not be detected by this.
-
-
 <h2><a name="stackdump">Stack Dumps</a></h2>
 
 <p>Like other desktop VMs, when the Dalvik VM receives a SIGQUIT
diff --git a/docs/instruction-formats.html b/docs/instruction-formats.html
index e9e4140..ada1bb2 100644
--- a/docs/instruction-formats.html
+++ b/docs/instruction-formats.html
@@ -62,13 +62,14 @@
 format "<code>21t</code>" is of length two, contains one register reference,
 and additionally contains a branch target.</p>
 
-<p>Suggested static linking formats have an additional "<code>s</code>" suffix,
-making them four characters total. Similarly, suggested "inline" linking
-formats have an additional "<code>i</code>" suffix. (In this context, inline
-linking is like static linking, except with more direct ties into a
-virtual machine's implementation.) Finally, one oddball suggested format
-("<code>20bc</code>") includes two pieces of data which are represented in
-its format ID.</p>
+<p>Suggested static linking formats have an additional
+"<code>s</code>" suffix, making them four characters total. Similarly,
+suggested "inline" linking formats have an additional "<code>i</code>"
+suffix. (In this context, inline linking is like static linking,
+except with more direct ties into a virtual machine's implementation.) 
+Finally, a couple oddball suggested formats (e.g.,
+"<code>20bc</code>") include two pieces of data which are both
+represented in its format ID.</p>
 
 <p>The full list of typecode letters are as follows. Note that some
 forms have different sizes, depending on the format:</p>
@@ -249,9 +250,9 @@
 <tr>
   <td>AA|<i>op</i> BBBB</td></td>
   <td>20bc</td>
-  <td><i><code>op</code></i> BB, kind@AAAA</td>
+  <td><i><code>op</code></i> AA, kind@BBBB</td>
   <td><i>suggested format for statically determined verification errors;
-    B is the type of error and A is an index into a type-appropriate
+    A is the type of error and B is an index into a type-appropriate
     table (e.g. method references for a no-such-method error)</i></td>
 </tr>
 <tr>
@@ -466,6 +467,13 @@
   <td>&nbsp;</td>
 </tr>
 <tr>
+  <td><i>exop</i> BBBB<sub>lo</sub> BBBB<sub>hi</sub> AAAA</td></td>
+  <td>40sc</td>
+  <td><i><code>exop</code></i> AAAA, kind@BBBBBBBB</td>
+  <td><i>suggested format for statically determined verification errors;
+    see <code>20bc</code>, above</i></td>
+</tr>
+<tr>
   <td><i>exop</i> BBBB<sub>lo</sub> BBBB<sub>hi</sub> AAAA
   <td>41c</td>
   <td><i><code>exop</code></i> vAAAA, field@BBBBBBBB<br/>
diff --git a/dx/src/com/android/dx/cf/code/BytecodeArray.java b/dx/src/com/android/dx/cf/code/BytecodeArray.java
index e942b13..f4ea007 100644
--- a/dx/src/com/android/dx/cf/code/BytecodeArray.java
+++ b/dx/src/com/android/dx/cf/code/BytecodeArray.java
@@ -1340,8 +1340,8 @@
     }
 
     /**
-     * Base implementation of {@link Visitor}, which has empty method
-     * bodies for all methods.
+     * Implementation of {@link Visitor}, which just pays attention
+     * to constant values.
      */
     class ConstantParserVisitor extends BaseVisitor {
         Constant cst;
@@ -1357,23 +1357,27 @@
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitInvalid(int opcode, int offset, int length) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitNoArgs(int opcode, int offset, int length,
                 Type type) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitLocal(int opcode, int offset, int length,
                 int idx, Type type, int value) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitConstant(int opcode, int offset, int length,
                 Constant cst, int value) {
             this.cst = cst;
@@ -1382,29 +1386,34 @@
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitBranch(int opcode, int offset, int length,
                 int target) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitSwitch(int opcode, int offset, int length,
                 SwitchList cases, int padding) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void visitNewarray(int offset, int length, CstType type,
                 ArrayList<Constant> initVals) {
             clear();
         }
 
         /** {@inheritDoc} */
+        @Override
         public void setPreviousOffset(int offset) {
             // Intentionally left empty
         }
 
         /** {@inheritDoc} */
+        @Override
         public int getPreviousOffset() {
             // Intentionally left empty
             return -1;
diff --git a/dx/src/com/android/dx/cf/code/Ropper.java b/dx/src/com/android/dx/cf/code/Ropper.java
index 8217166..715cfd8 100644
--- a/dx/src/com/android/dx/cf/code/Ropper.java
+++ b/dx/src/com/android/dx/cf/code/Ropper.java
@@ -1427,7 +1427,7 @@
                     IntList.makeImmutable (newSubStartLabel),
                             newSubStartLabel),
                 labelToSubroutines.get(b.getLabel()));
-       }
+        }
 
         /**
          * Copies a basic block, mapping its successors along the way.
@@ -1435,7 +1435,7 @@
          * @param origLabel original block label
          * @param newLabel label that the new block should have
          */
-       private void copyBlock(int origLabel, int newLabel) {
+        private void copyBlock(int origLabel, int newLabel) {
 
             BasicBlock origBlock = labelToBlock(origLabel);
 
@@ -1515,7 +1515,7 @@
          */
         private boolean involvedInSubroutine(int label, int subroutineStart) {
             IntList subroutinesList = labelToSubroutines.get(label);
-            return (subroutinesList.size() > 0
+            return (subroutinesList != null && subroutinesList.size() > 0
                     && subroutinesList.top() == subroutineStart);
         }
 
diff --git a/dx/src/com/android/dx/cf/code/RopperMachine.java b/dx/src/com/android/dx/cf/code/RopperMachine.java
index 0768cc8..5da2588 100644
--- a/dx/src/com/android/dx/cf/code/RopperMachine.java
+++ b/dx/src/com/android/dx/cf/code/RopperMachine.java
@@ -32,6 +32,7 @@
 import com.android.dx.rop.code.TranslationAdvice;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstFieldRef;
+import com.android.dx.rop.cst.CstInteger;
 import com.android.dx.rop.cst.CstMethodRef;
 import com.android.dx.rop.cst.CstNat;
 import com.android.dx.rop.cst.CstType;
@@ -517,19 +518,40 @@
              */
             cst = CstType.intern(rop.getResult());
         } else if ((cst == null) && (sourceCount == 2)) {
+            TypeBearer firstType = sources.get(0).getTypeBearer();
             TypeBearer lastType = sources.get(1).getTypeBearer();
 
-            if (lastType.isConstant()
-                    && advice.hasConstantOperation(rop,
-                    sources.get(0), sources.get(1))) {
-                /*
-                 * The target architecture has an instruction that can
-                 * build in the constant found in the second argument,
-                 * so pull it out of the sources and just use it as a
-                 * constant here.
-                 */
-                cst = (Constant) lastType;
-                sources = sources.withoutLast();
+            if ((lastType.isConstant() || firstType.isConstant()) &&
+                 advice.hasConstantOperation(rop, sources.get(0),
+                                             sources.get(1))) {
+
+                if (lastType.isConstant()) {
+                    /*
+                     * The target architecture has an instruction that can
+                     * build in the constant found in the second argument,
+                     * so pull it out of the sources and just use it as a
+                     * constant here.
+                     */
+                    cst = (Constant) lastType;
+                    sources = sources.withoutLast();
+
+                    // For subtraction, change to addition and invert constant
+                    if (rop.getOpcode() == RegOps.SUB) {
+                        ropOpcode = RegOps.ADD;
+                        CstInteger cstInt = (CstInteger) lastType;
+                        cst = CstInteger.make(-cstInt.getValue());
+                    }
+                } else {
+                    /*
+                     * The target architecture has an instruction that can
+                     * build in the constant found in the first argument,
+                     * so pull it out of the sources and just use it as a
+                     * constant here.
+                     */
+                    cst = (Constant) firstType;
+                    sources = sources.withoutFirst();
+                }
+
                 rop = Rops.ropFor(ropOpcode, destType, sources, cst);
             }
         }
diff --git a/dx/src/com/android/dx/cf/direct/ClassPathOpener.java b/dx/src/com/android/dx/cf/direct/ClassPathOpener.java
index 4e8c435..7621bf7 100644
--- a/dx/src/com/android/dx/cf/direct/ClassPathOpener.java
+++ b/dx/src/com/android/dx/cf/direct/ClassPathOpener.java
@@ -58,12 +58,13 @@
          * @param name {@code non-null;} filename of element. May not be a valid
          * filesystem path.
          *
+         * @param lastModified milliseconds since 1970-Jan-1 00:00:00 GMT
          * @param bytes {@code non-null;} file data
          * @return true on success. Result is or'd with all other results
          * from {@code processFileBytes} and returned to the caller
          * of {@code process()}.
          */
-        boolean processFileBytes(String name, byte[] bytes);
+        boolean processFileBytes(String name, long lastModified, byte[] bytes);
 
         /**
          * Informs consumer that an exception occurred while processing
@@ -131,7 +132,7 @@
             }
 
             byte[] bytes = FileUtils.readFile(file);
-            return consumer.processFileBytes(path, bytes);
+            return consumer.processFileBytes(path, file.lastModified(), bytes);
         } catch (Exception ex) {
             consumer.onException(ex);
             return false;
@@ -241,7 +242,7 @@
             in.close();
 
             byte[] bytes = baos.toByteArray();
-            any |= consumer.processFileBytes(path, bytes);
+            any |= consumer.processFileBytes(path, one.getTime(), bytes);
         }
 
         zip.close();
diff --git a/dx/src/com/android/dx/command/Main.java b/dx/src/com/android/dx/command/Main.java
index 70a94b0..09f543f 100644
--- a/dx/src/com/android/dx/command/Main.java
+++ b/dx/src/com/android/dx/command/Main.java
@@ -52,6 +52,11 @@
         "human-oriented format.\n" +
         "  dx --junit [-wait] <TestClass>\n" +
         "    Run the indicated unit test.\n" +
+        "  dx --find-usages <file.dex> <declaring type> <member>\n" +
+        "    Find references and declarations to a field or method.\n" +
+        "    declaring type: a class name in internal form, like " +
+        "Ljava/lang/Object;\n" +
+        "    member: a field or method name, like hashCode\n" +
         "  dx -J<option> ... <arguments, in one of the above " +
         "forms>\n" +
         "    Pass VM-specific options to the virtual machine that " +
@@ -99,6 +104,9 @@
                 } else if (arg.equals("--junit")) {
                     TestRunner.main(without(args, i));
                     break;
+                } else if (arg.equals("--find-usages")) {
+                    com.android.dx.command.findusages.Main.main(without(args, i));
+                    break;
                 } else if (arg.equals("--version")) {
                     version();
                     break;
diff --git a/dx/src/com/android/dx/command/annotool/AnnotationLister.java b/dx/src/com/android/dx/command/annotool/AnnotationLister.java
index a29e5ba..6584b60 100644
--- a/dx/src/com/android/dx/command/annotool/AnnotationLister.java
+++ b/dx/src/com/android/dx/command/annotool/AnnotationLister.java
@@ -63,7 +63,7 @@
 
             opener = new ClassPathOpener(path, true,
                     new ClassPathOpener.Consumer() {
-                public boolean processFileBytes(String name, byte[] bytes) {
+                public boolean processFileBytes(String name, long lastModified, byte[] bytes) {
                     if (!name.endsWith(".class")) {
                         return true;
                     }
diff --git a/dx/src/com/android/dx/command/dexer/Main.java b/dx/src/com/android/dx/command/dexer/Main.java
index 204caa7..6e8fa7e 100644
--- a/dx/src/com/android/dx/command/dexer/Main.java
+++ b/dx/src/com/android/dx/command/dexer/Main.java
@@ -17,10 +17,12 @@
 package com.android.dx.command.dexer;
 
 import com.android.dx.Version;
-import com.android.dx.cf.iface.ParseException;
+import com.android.dx.cf.code.SimException;
 import com.android.dx.cf.direct.ClassPathOpener;
+import com.android.dx.cf.iface.ParseException;
 import com.android.dx.command.DxConsole;
 import com.android.dx.command.UsageException;
+import com.android.dx.dex.DexFormat;
 import com.android.dx.dex.cf.CfOptions;
 import com.android.dx.dex.cf.CfTranslator;
 import com.android.dx.dex.cf.CodeStatistics;
@@ -28,21 +30,26 @@
 import com.android.dx.dex.file.ClassDefItem;
 import com.android.dx.dex.file.DexFile;
 import com.android.dx.dex.file.EncodedMethod;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.merge.DexMerger;
 import com.android.dx.rop.annotation.Annotation;
 import com.android.dx.rop.annotation.Annotations;
 import com.android.dx.rop.annotation.AnnotationsList;
 import com.android.dx.rop.cst.CstNat;
 import com.android.dx.rop.cst.CstUtf8;
-
+import com.android.dx.util.FileUtils;
 import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ExecutorService;
@@ -52,6 +59,8 @@
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
 
 /**
  * Main class for the class file translator.
@@ -95,12 +104,6 @@
         "lead to pain, suffering, grief, and lamentation.\n";
 
     /**
-     * {@code non-null;} name for the {@code .dex} file that goes into
-     * {@code .jar} files
-     */
-    private static final String DEX_IN_JAR_NAME = "classes.dex";
-
-    /**
      * {@code non-null;} name of the standard manifest file in {@code .jar}
      * files
      */
@@ -120,8 +123,8 @@
      */
     private static final String[] JAVAX_CORE = {
         "accessibility", "crypto", "imageio", "management", "naming", "net",
-        "print", "rmi", "security", "sound", "sql", "swing", "transaction",
-        "xml"
+        "print", "rmi", "security", "sip", "sound", "sql", "swing",
+        "transaction", "xml"
     };
 
     /** number of warnings during processing */
@@ -148,6 +151,9 @@
     /** true if any files are successfully processed */
     private static boolean anyFilesProcessed;
 
+    /** class files older than this must be defined in the target dex file. */
+    private static long minimumFileAge = 0;
+
     /**
      * This class is uninstantiable.
      */
@@ -159,7 +165,7 @@
      * Run and exit if something unexpected happened.
      * @param argArray the command line arguments
      */
-    public static void main(String[] argArray) {
+    public static void main(String[] argArray) throws IOException {
         Arguments arguments = new Arguments();
         arguments.parse(argArray);
 
@@ -174,7 +180,7 @@
      * @param arguments the data + parameters for the conversion
      * @return 0 if success > 0 otherwise.
      */
-    public static int run(Arguments arguments) {
+    public static int run(Arguments arguments) throws IOException {
         // Reset the error/warning count to start fresh.
         warnings = 0;
         errors = 0;
@@ -182,14 +188,40 @@
         args = arguments;
         args.makeCfOptions();
 
+        File incrementalOutFile = null;
+        if (args.incremental) {
+            if (args.outName == null) {
+                System.err.println(
+                        "error: no incremental output name specified");
+                return -1;
+            }
+            incrementalOutFile = new File(args.outName);
+            if (incrementalOutFile.exists()) {
+                minimumFileAge = incrementalOutFile.lastModified();
+            }
+        }
+
         if (!processAllFiles()) {
             return 1;
         }
 
-        byte[] outArray = writeDex();
+        if (args.incremental && !anyFilesProcessed) {
+            return 0; // this was a no-op incremental build
+        }
 
-        if (outArray == null) {
-            return 2;
+        // this array is null if no classes were defined
+        byte[] outArray = null;
+
+        if (!outputDex.isEmpty()) {
+            outArray = writeDex();
+
+            if (outArray == null) {
+                return 2;
+            }
+        }
+
+        if (args.incremental) {
+            outArray = merge(outArray, incrementalOutFile);
         }
 
         if (args.jarOutput) {
@@ -199,12 +231,65 @@
             if (!createJar(args.outName, outArray)) {
                 return 3;
             }
+        } else if (outArray != null && args.outName != null) {
+            OutputStream out = openOutput(args.outName);
+            out.write(outArray);
+            closeOutput(out);
         }
 
         return 0;
     }
 
     /**
+     * Merges the dex files {@code update} and {@code base}, preferring
+     * {@code update}'s definition for types defined in both dex files.
+     *
+     * @return the bytes of the merged dex file, or null if both the update
+     *     and the base dex do not exist.
+     */
+    private static byte[] merge(byte[] update, File base) throws IOException {
+        DexBuffer dexA = null;
+        DexBuffer dexB = null;
+
+        if (update != null) {
+            dexA = new DexBuffer();
+            dexA.loadFrom(new ByteArrayInputStream(update));
+        }
+
+        if (base.exists()) {
+            if (args.jarOutput) {
+                ZipFile zipFile = new ZipFile(base);
+                ZipEntry entry = zipFile.getEntry(DexFormat.DEX_IN_JAR_NAME);
+                if (entry != null) {
+                    dexB = new DexBuffer();
+                    dexB.loadFrom(zipFile.getInputStream(entry));
+                    zipFile.close();
+                }
+            } else {
+                InputStream in = new FileInputStream(base);
+                dexB = new DexBuffer();
+                dexB.loadFrom(in);
+                in.close();
+            }
+        }
+
+        DexBuffer result;
+        if (dexA == null && dexB == null) {
+            return null;
+        } else if (dexA == null) {
+            result = dexB;
+        } else if (dexB == null) {
+            result = dexA;
+        } else {
+            result = new DexMerger(dexA, dexB).merge();
+        }
+
+        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
+        result.writeTo(bytesOut);
+        return bytesOut.toByteArray();
+    }
+
+    /**
      * Constructs the output {@link DexFile}, fill it in with all the
      * specified classes, and populate the resources map if required.
      *
@@ -261,6 +346,10 @@
             return false;
         }
 
+        if (args.incremental && !anyFilesProcessed) {
+            return true;
+        }
+
         if (!(anyFilesProcessed || args.emptyOk)) {
             DxConsole.err.println("no classfiles specified");
             return false;
@@ -286,20 +375,25 @@
 
         opener = new ClassPathOpener(pathname, false,
                 new ClassPathOpener.Consumer() {
-            public boolean processFileBytes(String name, byte[] bytes) {
+            public boolean processFileBytes(String name, long lastModified, byte[] bytes) {
                 if (args.numThreads > 1) {
-                    threadPool.execute(new ParallelProcessor(name, bytes));
+                    threadPool.execute(new ParallelProcessor(name, lastModified, bytes));
                     return false;
                 } else {
-                    return Main.processFileBytes(name, bytes);
+                    return Main.processFileBytes(name, lastModified, bytes);
                 }
             }
             public void onException(Exception ex) {
                 if (ex instanceof StopProcessing) {
                     throw (StopProcessing) ex;
+                } else if (ex instanceof SimException) {
+                    DxConsole.err.println("\nEXCEPTION FROM SIMULATION:");
+                    DxConsole.err.println(ex.getMessage() + "\n");
+                    DxConsole.err.println(((SimException) ex).getContext());
+                } else {
+                    DxConsole.err.println("\nUNEXPECTED TOP-LEVEL EXCEPTION:");
+                    ex.printStackTrace(DxConsole.err);
                 }
-                DxConsole.err.println("\nUNEXPECTED TOP-LEVEL EXCEPTION:");
-                ex.printStackTrace(DxConsole.err);
                 errors++;
             }
             public void onProcessArchiveStart(File file) {
@@ -320,7 +414,7 @@
      * @param bytes {@code non-null;} contents of the file
      * @return whether processing was successful
      */
-    private static boolean processFileBytes(String name, byte[] bytes) {
+    private static boolean processFileBytes(String name, long lastModified, byte[] bytes) {
         boolean isClass = name.endsWith(".class");
         boolean keepResources = (outputResources != null);
 
@@ -343,6 +437,9 @@
                     outputResources.put(fixedName, bytes);
                 }
             }
+            if (lastModified < minimumFileAge) {
+                return true;
+            }
             return processClass(fixedName, bytes);
         } else {
             synchronized (outputResources) {
@@ -426,9 +523,8 @@
     }
 
     /**
-     * Converts {@link #outputDex} into a {@code byte[]}, write
-     * it out to the proper file (if any), and also do whatever human-oriented
-     * dumping is required.
+     * Converts {@link #outputDex} into a {@code byte[]} and do whatever
+     * human-oriented dumping is required.
      *
      * @return {@code null-ok;} the converted {@code byte[]} or {@code null}
      * if there was a problem
@@ -437,7 +533,6 @@
         byte[] outArray = null;
 
         try {
-            OutputStream out = null;
             OutputStream humanOutRaw = null;
             OutputStreamWriter humanOut = null;
             try {
@@ -460,11 +555,6 @@
                      * and write it, dump it, etc.
                      */
                     outArray = outputDex.toDex(humanOut, args.verboseDump);
-
-                    if ((args.outName != null) && !args.jarOutput) {
-                        out = openOutput(args.outName);
-                        out.write(outArray);
-                    }
                 }
 
                 if (args.statistics) {
@@ -474,7 +564,6 @@
                 if (humanOut != null) {
                     humanOut.flush();
                 }
-                closeOutput(out);
                 closeOutput(humanOutRaw);
             }
         } catch (Exception ex) {
@@ -495,8 +584,8 @@
      * Creates a jar file from the resources and given dex file array.
      *
      * @param fileName {@code non-null;} name of the file
-     * @param dexArray {@code non-null;} array containing the dex file
-     * to include
+     * @param dexArray array containing the dex file to include, or null if the
+     *     output contains no class defs.
      * @return whether the creation was successful
      */
     private static boolean createJar(String fileName, byte[] dexArray) {
@@ -511,7 +600,9 @@
             OutputStream out = openOutput(fileName);
             JarOutputStream jarOut = new JarOutputStream(out, manifest);
 
-            outputResources.put(DEX_IN_JAR_NAME, dexArray);
+            if (dexArray != null) {
+                outputResources.put(DexFormat.DEX_IN_JAR_NAME, dexArray);
+            }
 
             try {
                 for (Map.Entry<String, byte[]> e :
@@ -580,7 +671,7 @@
         createdBy += "dx " + Version.VERSION;
 
         attribs.put(CREATED_BY, createdBy);
-        attribs.putValue("Dex-Location", DEX_IN_JAR_NAME);
+        attribs.putValue("Dex-Location", DexFormat.DEX_IN_JAR_NAME);
 
         return manifest;
     }
@@ -816,6 +907,9 @@
         /** whether to keep local variable information */
         public boolean localInfo = true;
 
+        /** whether to merge with the output dex file if it exists. */
+        public boolean incremental = false;
+
         /** {@code non-null after {@link #parse};} file name arguments */
         public String[] fileNames;
 
@@ -885,9 +979,7 @@
                     keepClassesInJar = true;
                 } else if (arg.startsWith("--output=")) {
                     outName = arg.substring(arg.indexOf('=') + 1);
-                    if (outName.endsWith(".zip") ||
-                            outName.endsWith(".jar") ||
-                            outName.endsWith(".apk")) {
+                    if (FileUtils.hasArchiveSuffix(outName)) {
                         jarOutput = true;
                     } else if (outName.endsWith(".dex") ||
                                outName.equals("-")) {
@@ -923,6 +1015,8 @@
                 } else if (arg.startsWith("--num-threads=")) {
                     arg = arg.substring(arg.indexOf('=') + 1);
                     numThreads = Integer.parseInt(arg);
+                } else if (arg.equals("--incremental")) {
+                    incremental = true;
                 } else {
                     System.err.println("unknown option: " + arg);
                     throw new UsageException();
@@ -973,6 +1067,7 @@
     private static class ParallelProcessor implements Runnable {
 
         String path;
+        long lastModified;
         byte[] bytes;
 
         /**
@@ -982,8 +1077,9 @@
          * filesystem path.
          * @param bytes {@code non-null;} file data
          */
-        private ParallelProcessor(String path, byte bytes[]) {
+        private ParallelProcessor(String path, long lastModified, byte bytes[]) {
             this.path = path;
+            this.lastModified = lastModified;
             this.bytes = bytes;
         }
 
@@ -992,7 +1088,7 @@
          * with the given path and bytes.
          */
         public void run() {
-            if (Main.processFileBytes(path, bytes)) {
+            if (Main.processFileBytes(path, lastModified, bytes)) {
                 anyFilesProcessed = true;
             }
         }
diff --git a/dx/src/com/android/dx/command/findusages/FindUsages.java b/dx/src/com/android/dx/command/findusages/FindUsages.java
new file mode 100644
index 0000000..1c692ae
--- /dev/null
+++ b/dx/src/com/android/dx/command/findusages/FindUsages.java
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.command.findusages;
+
+import com.android.dx.io.ClassData;
+import com.android.dx.io.ClassDef;
+import com.android.dx.io.CodeReader;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.io.FieldId;
+import com.android.dx.io.MethodId;
+import com.android.dx.io.OpcodeInfo;
+import com.android.dx.io.instructions.DecodedInstruction;
+import java.io.PrintStream;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+public final class FindUsages {
+    private final DexBuffer dex;
+    private final Set<Integer> methodIds;
+    private final Set<Integer> fieldIds;
+    private final CodeReader codeReader = new CodeReader();
+    private final PrintStream out;
+
+    private ClassDef currentClass;
+    private ClassData.Method currentMethod;
+
+    public FindUsages(DexBuffer dex, String declaredBy, String memberName, final PrintStream out) {
+        this.dex = dex;
+        this.out = out;
+
+        int typeStringIndex = Collections.binarySearch(dex.strings(), declaredBy);
+        int memberNameIndex = Collections.binarySearch(dex.strings(), memberName);
+        if (typeStringIndex < 0 || memberNameIndex < 0) {
+            methodIds = null;
+            fieldIds = null;
+            return; // these symbols are not mentioned in this dex
+        }
+
+        int typeIndex = Collections.binarySearch(dex.typeIds(), typeStringIndex);
+        if (typeIndex < 0) {
+            methodIds = null;
+            fieldIds = null;
+            return; // this type name isn't used as a type in this dex
+        }
+
+        methodIds = getMethodIds(dex, memberNameIndex, typeIndex);
+        fieldIds = getFieldIds(dex, memberNameIndex, typeIndex);
+
+        codeReader.setFieldVisitor(new CodeReader.Visitor() {
+            public void visit(DecodedInstruction[] all,
+                    DecodedInstruction one) {
+                int fieldId = one.getIndex();
+                if (fieldIds.contains(fieldId)) {
+                    out.println(location() + ": field reference ("
+                            + OpcodeInfo.getName(one.getOpcode()) + ")");
+                }
+            }
+        });
+
+        codeReader.setMethodVisitor(new CodeReader.Visitor() {
+            public void visit(DecodedInstruction[] all,
+                    DecodedInstruction one) {
+                int methodId = one.getIndex();
+                if (methodIds.contains(methodId)) {
+                    out.println(location() + ": method reference ("
+                            + OpcodeInfo.getName(one.getOpcode()) + ")");
+                }
+            }
+        });
+    }
+
+    private String location() {
+        String className = dex.typeNames().get(currentClass.getTypeIndex());
+        if (currentMethod != null) {
+            MethodId methodId = dex.methodIds().get(currentMethod.getMethodIndex());
+            return className + "." + dex.strings().get(methodId.getNameIndex());
+        } else {
+            return className;
+        }
+    }
+
+    /**
+     * Prints usages to out.
+     */
+    public void findUsages() {
+        if (fieldIds == null || methodIds == null) {
+            return;
+        }
+
+        for (ClassDef classDef : dex.classDefs()) {
+            currentClass = classDef;
+            currentMethod = null;
+
+            if (classDef.getClassDataOffset() == 0) {
+                continue;
+            }
+
+            ClassData classData = dex.readClassData(classDef);
+            for (ClassData.Field field : classData.allFields()) {
+                if (fieldIds.contains(field.getFieldIndex())) {
+                    out.println(location() + " field declared");
+                }
+            }
+
+            for (ClassData.Method method : classData.allMethods()) {
+                currentMethod = method;
+                if (methodIds.contains(method.getMethodIndex())) {
+                    out.println(location() + " method declared");
+                }
+                if (method.getCodeOffset() != 0) {
+                    codeReader.visitAll(dex.readCode(method).getInstructions());
+                }
+            }
+        }
+
+        currentClass = null;
+        currentMethod = null;
+    }
+
+    /**
+     * Returns the fields with {@code memberNameIndex} declared by {@code
+     * declaringType}.
+     */
+    private Set<Integer> getFieldIds(DexBuffer dex, int memberNameIndex, int declaringType) {
+        Set<Integer> fields = new HashSet<Integer>();
+        int fieldIndex = 0;
+        for (FieldId fieldId : dex.fieldIds()) {
+            if (fieldId.getNameIndex() == memberNameIndex
+                    && declaringType == (int) fieldId.getDeclaringClassIndex()) {
+                fields.add(fieldIndex);
+            }
+            fieldIndex++;
+        }
+        return fields;
+    }
+
+    /**
+     * Returns the methods with {@code memberNameIndex} declared by {@code
+     * declaringType} and its subtypes.
+     */
+    private Set<Integer> getMethodIds(DexBuffer dex, int memberNameIndex, int declaringType) {
+        Set<Integer> subtypes = findAssignableTypes(dex, declaringType);
+
+        Set<Integer> methods = new HashSet<Integer>();
+        int methodIndex = 0;
+        for (MethodId method : dex.methodIds()) {
+            if (method.getNameIndex() == memberNameIndex
+                    && subtypes.contains((int) method.getDeclaringClassIndex())) {
+                methods.add(methodIndex);
+            }
+            methodIndex++;
+        }
+        return methods;
+    }
+
+    /**
+     * Returns the set of types that can be assigned to {@code typeIndex}.
+     */
+    private Set<Integer> findAssignableTypes(DexBuffer dex, int typeIndex) {
+        Set<Integer> assignableTypes = new HashSet<Integer>();
+        assignableTypes.add(typeIndex);
+
+        for (ClassDef classDef : dex.classDefs()) {
+            if (assignableTypes.contains(classDef.getSupertypeIndex())) {
+                assignableTypes.add(classDef.getTypeIndex());
+                continue;
+            }
+
+            for (int implemented : classDef.getInterfaces()) {
+                if (assignableTypes.contains(implemented)) {
+                    assignableTypes.add(classDef.getTypeIndex());
+                    break;
+                }
+            }
+        }
+
+        return assignableTypes;
+    }
+}
diff --git a/dx/src/com/android/dx/command/findusages/Main.java b/dx/src/com/android/dx/command/findusages/Main.java
new file mode 100644
index 0000000..c3c203a
--- /dev/null
+++ b/dx/src/com/android/dx/command/findusages/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.command.findusages;
+
+import com.android.dx.dex.DexFormat;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.util.FileUtils;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.ZipFile;
+
+public final class Main {
+    public static void main(String[] args) throws IOException {
+        String dexFile = args[0];
+        String declaredBy = args[1];
+        String memberName = args[2];
+
+        DexBuffer dex = new DexBuffer();
+        if (FileUtils.hasArchiveSuffix(dexFile)) {
+            ZipFile zip = new ZipFile(dexFile);
+            InputStream in = zip.getInputStream(zip.getEntry(DexFormat.DEX_IN_JAR_NAME));
+            dex.loadFrom(in);
+            zip.close();
+        } else {
+            dex.loadFrom(new File(dexFile));
+        }
+
+        new FindUsages(dex, declaredBy, memberName, System.out).findUsages();
+    }
+}
diff --git a/dx/src/com/android/dx/command/grep/Grep.java b/dx/src/com/android/dx/command/grep/Grep.java
new file mode 100644
index 0000000..025aa65
--- /dev/null
+++ b/dx/src/com/android/dx/command/grep/Grep.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.command.grep;
+
+import com.android.dx.io.ClassData;
+import com.android.dx.io.ClassDef;
+import com.android.dx.io.CodeReader;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.io.EncodedValueReader;
+import com.android.dx.io.MethodId;
+import com.android.dx.io.instructions.DecodedInstruction;
+import java.io.PrintStream;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+public final class Grep {
+    private final DexBuffer dex;
+    private final CodeReader codeReader = new CodeReader();
+    private final Set<Integer> stringIds;
+
+    private final PrintStream out;
+    private int count = 0;
+
+    private ClassDef currentClass;
+    private ClassData.Method currentMethod;
+
+    public Grep(final DexBuffer dex, Pattern pattern, final PrintStream out) {
+        this.dex = dex;
+        this.out = out;
+
+        stringIds = getStringIds(dex, pattern);
+
+        codeReader.setStringVisitor(new CodeReader.Visitor() {
+            public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+                encounterString(one.getIndex());
+            }
+        });
+    }
+
+    private EncodedValueReader newEncodedValueReader(DexBuffer.Section section) {
+        return new EncodedValueReader(section) {
+            @Override protected void visitString(int type, int index) {
+                encounterString(index);
+            }
+        };
+    }
+
+    private void encounterString(int index) {
+        if (stringIds.contains(index)) {
+            out.println(location() + " " + dex.strings().get(index));
+            count++;
+        }
+    }
+
+    private String location() {
+        String className = dex.typeNames().get(currentClass.getTypeIndex());
+        if (currentMethod != null) {
+            MethodId methodId = dex.methodIds().get(currentMethod.getMethodIndex());
+            return className + "." + dex.strings().get(methodId.getNameIndex());
+        } else {
+            return className;
+        }
+    }
+
+    /**
+     * Prints usages to out. Returns the number of matches found.
+     */
+    public int grep() {
+        for (ClassDef classDef : dex.classDefs()) {
+            currentClass = classDef;
+            currentMethod = null;
+
+            if (classDef.getClassDataOffset() == 0) {
+                continue;
+            }
+
+            ClassData classData = dex.readClassData(classDef);
+
+            // find the strings in encoded constants
+            int staticValuesOffset = classDef.getStaticValuesOffset();
+            if (staticValuesOffset != 0) {
+                newEncodedValueReader(dex.open(staticValuesOffset)).readArray();
+            }
+
+            // find the strings in method bodies
+            for (ClassData.Method method : classData.allMethods()) {
+                currentMethod = method;
+                if (method.getCodeOffset() != 0) {
+                    codeReader.visitAll(dex.readCode(method).getInstructions());
+                }
+            }
+        }
+
+        currentClass = null;
+        currentMethod = null;
+        return count;
+    }
+
+    private Set<Integer> getStringIds(DexBuffer dex, Pattern pattern) {
+        Set<Integer> stringIds = new HashSet<Integer>();
+        int stringIndex = 0;
+        for (String s : dex.strings()) {
+            if (pattern.matcher(s).find()) {
+                stringIds.add(stringIndex);
+            }
+            stringIndex++;
+        }
+        return stringIds;
+    }
+}
diff --git a/dx/src/com/android/dx/command/grep/Main.java b/dx/src/com/android/dx/command/grep/Main.java
new file mode 100644
index 0000000..e765a31
--- /dev/null
+++ b/dx/src/com/android/dx/command/grep/Main.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.command.grep;
+
+import com.android.dx.dex.DexFormat;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.util.FileUtils;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.regex.Pattern;
+import java.util.zip.ZipFile;
+
+public final class Main {
+    public static void main(String[] args) throws IOException {
+        String dexFile = args[0];
+        String pattern = args[1];
+
+        DexBuffer dex = new DexBuffer();
+        if (FileUtils.hasArchiveSuffix(dexFile)) {
+            ZipFile zip = new ZipFile(dexFile);
+            InputStream in = zip.getInputStream(zip.getEntry(DexFormat.DEX_IN_JAR_NAME));
+            dex.loadFrom(in);
+            zip.close();
+        } else {
+            dex.loadFrom(new File(dexFile));
+        }
+
+        int count = new Grep(dex, Pattern.compile(pattern), System.out).grep();
+        System.exit((count > 0) ? 0 : 1);
+    }
+}
diff --git a/dx/src/com/android/dx/dex/DexFormat.java b/dx/src/com/android/dx/dex/DexFormat.java
new file mode 100644
index 0000000..4b83901
--- /dev/null
+++ b/dx/src/com/android/dx/dex/DexFormat.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.dex;
+
+public final class DexFormat {
+    private DexFormat() {}
+
+    public static final String DEX_IN_JAR_NAME = "classes.dex";
+    public static final String MAGIC = "dex\n035\0";
+    public static final int ENDIAN_TAG = 0x12345678;
+}
diff --git a/dx/src/com/android/dx/dex/SizeOf.java b/dx/src/com/android/dx/dex/SizeOf.java
new file mode 100644
index 0000000..476f7bb
--- /dev/null
+++ b/dx/src/com/android/dx/dex/SizeOf.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.dex;
+
+public final class SizeOf {
+    private SizeOf() {}
+
+    public static final int UBYTE = 1;
+    public static final int USHORT = 2;
+    public static final int UINT = 4;
+
+    public static final int SIGNATURE = UBYTE * 20;
+
+    /**
+     * magic ubyte[8]
+     * checksum uint
+     * signature ubyte[20]
+     * file_size uint
+     * header_size uint
+     * endian_tag uint
+     * link_size uint
+     * link_off uint
+     * map_off uint
+     * string_ids_size uint
+     * string_ids_off uint
+     * type_ids_size uint
+     * type_ids_off uint
+     * proto_ids_size uint
+     * proto_ids_off uint
+     * field_ids_size uint
+     * field_ids_off uint
+     * method_ids_size uint
+     * method_ids_off uint
+     * class_defs_size uint
+     * class_defs_off uint
+     * data_size uint
+     * data_off uint
+     */
+    public static final int HEADER_ITEM = (8 * UBYTE) + UINT + SIGNATURE + (20 * UINT); // 0x70
+
+    /**
+     * string_data_off uint
+     */
+    public static final int STRING_ID_ITEM = UINT;
+
+    /**
+     * descriptor_idx uint
+     */
+    public static final int TYPE_ID_ITEM = UINT;
+
+    /**
+     * type_idx ushort
+     */
+    public static final int TYPE_ITEM = USHORT;
+
+    /**
+     * shorty_idx uint
+     * return_type_idx uint
+     * return_type_idx uint
+     */
+    public static final int PROTO_ID_ITEM = UINT + UINT + UINT;
+
+    /**
+     * class_idx ushort
+     * type_idx/proto_idx ushort
+     * name_idx uint
+     */
+    public static final int MEMBER_ID_ITEM = USHORT + USHORT + UINT;
+
+    /**
+     * class_idx uint
+     * access_flags uint
+     * superclass_idx uint
+     * interfaces_off uint
+     * source_file_idx uint
+     * annotations_off uint
+     * class_data_off uint
+     * static_values_off uint
+     */
+    public static final int CLASS_DEF_ITEM = 8 * UINT;
+
+    /**
+     * type ushort
+     * unused ushort
+     * size uint
+     * offset uint
+     */
+    public static final int MAP_ITEM = USHORT + USHORT + UINT + UINT;
+}
diff --git a/dx/src/com/android/dx/dex/TableOfContents.java b/dx/src/com/android/dx/dex/TableOfContents.java
new file mode 100644
index 0000000..e5d56b7
--- /dev/null
+++ b/dx/src/com/android/dx/dex/TableOfContents.java
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.dex;
+
+import com.android.dx.io.DexBuffer;
+import com.android.dx.util.DexException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+
+/**
+ * The file header and map.
+ */
+public final class TableOfContents {
+
+    /*
+     * TODO: factor out ID constants.
+     */
+
+    public final Section header = new Section(0x0000);
+    public final Section stringIds = new Section(0x0001);
+    public final Section typeIds = new Section(0x0002);
+    public final Section protoIds = new Section(0x0003);
+    public final Section fieldIds = new Section(0x0004);
+    public final Section methodIds = new Section(0x0005);
+    public final Section classDefs = new Section(0x0006);
+    public final Section mapList = new Section(0x1000);
+    public final Section typeLists = new Section(0x1001);
+    public final Section annotationSetRefLists = new Section(0x1002);
+    public final Section annotationSets = new Section(0x1003);
+    public final Section classDatas = new Section(0x2000);
+    public final Section codes = new Section(0x2001);
+    public final Section stringDatas = new Section(0x2002);
+    public final Section debugInfos = new Section(0x2003);
+    public final Section annotations = new Section(0x2004);
+    public final Section encodedArrays = new Section(0x2005);
+    public final Section annotationsDirectories = new Section(0x2006);
+    public final Section[] sections = {
+            header, stringIds, typeIds, protoIds, fieldIds, methodIds, classDefs, mapList,
+            typeLists, annotationSetRefLists, annotationSets, classDatas, codes, stringDatas,
+            debugInfos, annotations, encodedArrays, annotationsDirectories
+    };
+
+    public int checksum;
+    public byte[] signature;
+    public int fileSize;
+    public int linkSize;
+    public int linkOff;
+    public int dataSize;
+    public int dataOff;
+
+    public TableOfContents() {
+        signature = new byte[20];
+    }
+
+    public void readFrom(DexBuffer buffer) throws IOException {
+        readHeader(buffer.open(0));
+        readMap(buffer.open(mapList.off));
+        computeSizesFromOffsets();
+    }
+
+    private void readHeader(DexBuffer.Section headerIn) throws UnsupportedEncodingException {
+        byte[] magic = headerIn.readByteArray(8);
+        if (!Arrays.equals(DexFormat.MAGIC.getBytes("UTF-8"), magic)) {
+            throw new DexException("Unexpected magic: " + Arrays.toString(magic));
+        }
+
+        checksum = headerIn.readInt();
+        signature = headerIn.readByteArray(20);
+        fileSize = headerIn.readInt();
+        int headerSize = headerIn.readInt();
+        if (headerSize != SizeOf.HEADER_ITEM) {
+            throw new DexException("Unexpected header: 0x" + Integer.toHexString(headerSize));
+        }
+        int endianTag = headerIn.readInt();
+        if (endianTag != DexFormat.ENDIAN_TAG) {
+            throw new DexException("Unexpected endian tag: 0x" + Integer.toHexString(endianTag));
+        }
+        linkSize = headerIn.readInt();
+        linkOff = headerIn.readInt();
+        mapList.off = headerIn.readInt();
+        if (mapList.off == 0) {
+            throw new DexException("Cannot merge dex files that do not contain a map");
+        }
+        stringIds.size = headerIn.readInt();
+        stringIds.off = headerIn.readInt();
+        typeIds.size = headerIn.readInt();
+        typeIds.off = headerIn.readInt();
+        protoIds.size = headerIn.readInt();
+        protoIds.off = headerIn.readInt();
+        fieldIds.size = headerIn.readInt();
+        fieldIds.off = headerIn.readInt();
+        methodIds.size = headerIn.readInt();
+        methodIds.off = headerIn.readInt();
+        classDefs.size = headerIn.readInt();
+        classDefs.off = headerIn.readInt();
+        dataSize = headerIn.readInt();
+        dataOff = headerIn.readInt();
+    }
+
+    private void readMap(DexBuffer.Section in) throws IOException {
+        int mapSize = in.readInt();
+        Section previous = null;
+        for (int i = 0; i < mapSize; i++) {
+            short type = in.readShort();
+            in.readShort(); // unused
+            Section section = getSection(type);
+            int size = in.readInt();
+            int offset = in.readInt();
+
+            if ((section.size != -1 && section.size != size)
+                    || (section.off != -1 && section.off != offset)) {
+                throw new DexException("Unexpected map value for 0x" + Integer.toHexString(type));
+            }
+
+            section.size = size;
+            section.off = offset;
+
+            if (previous != null && previous.off > section.off) {
+                throw new DexException("Map is unsorted at " + previous + ", " + section);
+            }
+
+            previous = section;
+        }
+        Arrays.sort(sections);
+    }
+
+    public void computeSizesFromOffsets() {
+        int end = dataOff + dataSize;
+        for (int i = sections.length - 1; i >= 0; i--) {
+            Section section = sections[i];
+            if (section.off == -1) {
+                continue;
+            }
+            if (section.off > end) {
+                throw new DexException("Map is unsorted at " + section);
+            }
+            section.byteCount = end - section.off;
+            end = section.off;
+        }
+    }
+
+    private Section getSection(short type) {
+        for (Section section : sections) {
+            if (section.type == type) {
+                return section;
+            }
+        }
+        throw new IllegalArgumentException("No such map item: " + type);
+    }
+
+    public void writeHeader(DexBuffer.Section out) throws IOException {
+        out.write(DexFormat.MAGIC.getBytes("UTF-8"));
+        out.writeInt(checksum);
+        out.write(signature);
+        out.writeInt(fileSize);
+        out.writeInt(SizeOf.HEADER_ITEM);
+        out.writeInt(DexFormat.ENDIAN_TAG);
+        out.writeInt(linkSize);
+        out.writeInt(linkOff);
+        out.writeInt(mapList.off);
+        out.writeInt(stringIds.size);
+        out.writeInt(stringIds.off);
+        out.writeInt(typeIds.size);
+        out.writeInt(typeIds.off);
+        out.writeInt(protoIds.size);
+        out.writeInt(protoIds.off);
+        out.writeInt(fieldIds.size);
+        out.writeInt(fieldIds.off);
+        out.writeInt(methodIds.size);
+        out.writeInt(methodIds.off);
+        out.writeInt(classDefs.size);
+        out.writeInt(classDefs.off);
+        out.writeInt(dataSize);
+        out.writeInt(dataOff);
+    }
+
+    public void writeMap(DexBuffer.Section out) throws IOException {
+        int count = 0;
+        for (Section section : sections) {
+            if (section.size > 0) {
+                count++;
+            }
+        }
+
+        out.writeInt(count);
+        for (Section section : sections) {
+            if (section.size > 0) {
+                out.writeShort(section.type);
+                out.writeShort((short) 0);
+                out.writeInt(section.size);
+                out.writeInt(section.off);
+            }
+        }
+    }
+
+    public static class Section implements Comparable<Section> {
+        public final short type;
+        public int size = -1;
+        public int off = -1;
+        public int byteCount = 0;
+
+        public Section(int type) {
+            this.type = (short) type;
+        }
+
+        public boolean exists() {
+            return size != -1;
+        }
+
+        public int compareTo(Section section) {
+            if (off != section.off) {
+                return off < section.off ? -1 : 1;
+            }
+            return 0;
+        }
+
+        @Override public String toString() {
+            return String.format("Section[type=%#x,off=%#x,size=%#x]", type, off, size);
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/dex/code/ArrayData.java b/dx/src/com/android/dx/dex/code/ArrayData.java
index 145f2c2..6674b75 100644
--- a/dx/src/com/android/dx/dex/code/ArrayData.java
+++ b/dx/src/com/android/dx/dex/code/ArrayData.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.code.SourcePosition;
 import com.android.dx.rop.cst.*;
@@ -109,7 +110,7 @@
     public void writeTo(AnnotatedOutput out) {
         int sz = values.size();
 
-        out.writeShort(0x300 | DalvOps.NOP);
+        out.writeShort(Opcodes.FILL_ARRAY_DATA_PAYLOAD);
         out.writeShort(elemWidth);
         out.writeInt(initLength);
 
@@ -183,7 +184,7 @@
         StringBuffer sb = new StringBuffer(100);
         int sz = values.size();
 
-        sb.append("array-data // for fill-array-data @ ");
+        sb.append("fill-array-data-payload // for fill-array-data @ ");
         sb.append(Hex.u2(baseAddress));
 
         for (int i = 0; i < sz; i++) {
diff --git a/dx/src/com/android/dx/dex/code/DalvInsn.java b/dx/src/com/android/dx/dex/code/DalvInsn.java
index f203817..d0cf395 100644
--- a/dx/src/com/android/dx/dex/code/DalvInsn.java
+++ b/dx/src/com/android/dx/dex/code/DalvInsn.java
@@ -23,6 +23,8 @@
 import com.android.dx.util.Hex;
 import com.android.dx.util.TwoColumnOutput;
 
+import java.util.BitSet;
+
 /**
  * Base class for Dalvik instructions.
  */
@@ -205,60 +207,85 @@
 
     /**
      * Gets the minimum distinct registers required for this instruction.
+     * Uses the given BitSet to determine which registers require
+     * replacement, and ignores registers that are already compatible.
      * This assumes that the result (if any) can share registers with the
      * sources (if any), that each source register is unique, and that
      * (to be explicit here) category-2 values take up two consecutive
      * registers.
      *
+     * @param compatRegs {@code non-null;} set of compatible registers
      * @return {@code >= 0;} the minimum distinct register requirement
      */
-    public final int getMinimumRegisterRequirement() {
+    public final int getMinimumRegisterRequirement(BitSet compatRegs) {
         boolean hasResult = hasResult();
         int regSz = registers.size();
-        int resultRequirement = hasResult ? registers.get(0).getCategory() : 0;
+        int resultRequirement = 0;
         int sourceRequirement = 0;
 
+        if (hasResult && !compatRegs.get(0)) {
+            resultRequirement = registers.get(0).getCategory();
+        }
+
         for (int i = hasResult ? 1 : 0; i < regSz; i++) {
-            sourceRequirement += registers.get(i).getCategory();
+            if (!compatRegs.get(i)) {
+                sourceRequirement += registers.get(i).getCategory();
+            }
         }
 
         return Math.max(sourceRequirement, resultRequirement);
     }
 
     /**
-     * Gets the instruction prefix required, if any, to use in a high
-     * register transformed version of this instance.
+     * Gets the instruction that is equivalent to this one, except that
+     * it uses sequential registers starting at {@code 0} (storing
+     * the result, if any, in register {@code 0} as well).
      *
-     * @see #hrVersion
+     * @return {@code non-null;} the replacement
+     */
+    public DalvInsn getLowRegVersion() {
+        RegisterSpecList regs =
+            registers.withExpandedRegisters(0, hasResult(), null);
+        return withRegisters(regs);
+    }
+
+    /**
+     * Gets the instruction prefix required, if any, to use in an expanded
+     * version of this instance. Will not generate moves for registers
+     * marked compatible to the format by the given BitSet.
      *
+     * @see #expandedVersion
+     *
+     * @param compatRegs {@code non-null;} set of compatible registers
      * @return {@code null-ok;} the prefix, if any
      */
-    public DalvInsn hrPrefix() {
+    public DalvInsn expandedPrefix(BitSet compatRegs) {
         RegisterSpecList regs = registers;
-        int sz = regs.size();
+        boolean firstBit = compatRegs.get(0);
 
-        if (hasResult()) {
-            if (sz == 1) {
-                return null;
-            }
-            regs = regs.withoutFirst();
-        } else if (sz == 0) {
-            return null;
-        }
+        if (hasResult()) compatRegs.set(0);
+
+        regs = regs.subset(compatRegs);
+
+        if (hasResult()) compatRegs.set(0, firstBit);
+
+        if (regs.size() == 0) return null;
 
         return new HighRegisterPrefix(position, regs);
     }
 
     /**
-     * Gets the instruction suffix required, if any, to use in a high
-     * register transformed version of this instance.
+     * Gets the instruction suffix required, if any, to use in an expanded
+     * version of this instance. Will not generate a move for a register
+     * marked compatible to the format by the given BitSet.
      *
-     * @see #hrVersion
+     * @see #expandedVersion
      *
+     * @param compatRegs {@code non-null;} set of compatible registers
      * @return {@code null-ok;} the suffix, if any
      */
-    public DalvInsn hrSuffix() {
-        if (hasResult()) {
+    public DalvInsn expandedSuffix(BitSet compatRegs) {
+        if (hasResult() && !compatRegs.get(0)) {
             RegisterSpec r = registers.get(0);
             return makeMove(position, r, r.withReg(0));
         } else {
@@ -268,20 +295,21 @@
 
     /**
      * Gets the instruction that is equivalent to this one, except that
-     * uses sequential registers starting at {@code 0} (storing
-     * the result, if any, in register {@code 0} as well). The
-     * sequence of instructions from {@link #hrPrefix} and {@link
-     * #hrSuffix} (if non-null) surrounding the result of a call to
-     * this method are the high register transformation of this
-     * instance, and it is guaranteed that the number of low registers
-     * used will be the number returned by {@link
-     * #getMinimumRegisterRequirement}.
+     * it replaces incompatible registers with sequential registers
+     * starting at {@code 0} (storing the result, if any, in register
+     * {@code 0} as well). The sequence of instructions from
+     * {@link #expandedPrefix} and {@link #expandedSuffix} (if non-null)
+     * surrounding the result of a call to this method are the expanded
+     * transformation of this instance, and it is guaranteed that the
+     * number of low registers used will be the number returned by
+     * {@link #getMinimumRegisterRequirement}.
      *
+     * @param compatRegs {@code non-null;} set of compatible registers
      * @return {@code non-null;} the replacement
      */
-    public DalvInsn hrVersion() {
+    public DalvInsn expandedVersion(BitSet compatRegs) {
         RegisterSpecList regs =
-            registers.withSequentialRegisters(0, hasResult());
+            registers.withExpandedRegisters(0, hasResult(), compatRegs);
         return withRegisters(regs);
     }
 
diff --git a/dx/src/com/android/dx/dex/code/DalvInsnList.java b/dx/src/com/android/dx/dex/code/DalvInsnList.java
index 0f8c23d..e856cb4 100644
--- a/dx/src/com/android/dx/dex/code/DalvInsnList.java
+++ b/dx/src/com/android/dx/dex/code/DalvInsnList.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstBaseMethodRef;
 import com.android.dx.util.AnnotatedOutput;
@@ -201,7 +202,7 @@
             }
 
             boolean isStatic =
-                (insn.getOpcode().getFamily() == DalvOps.INVOKE_STATIC);
+                (insn.getOpcode().getFamily() == Opcodes.INVOKE_STATIC);
             int count =
                 ((CstBaseMethodRef) cst).getParameterWordCount(isStatic);
 
diff --git a/dx/src/com/android/dx/dex/code/DalvOps.java b/dx/src/com/android/dx/dex/code/DalvOps.java
deleted file mode 100644
index 2a83c70..0000000
--- a/dx/src/com/android/dx/dex/code/DalvOps.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.dx.dex.code;
-
-/**
- * All the Dalvik opcode value constants. See the related spec
- * document for the meaning and instruction format of each opcode.
- */
-public final class DalvOps {
-    /** pseudo-opcode used for nonstandard format "instructions" */
-    public static final int SPECIAL_FORMAT = -1;
-
-    /** pseudo-opcode used to indicate there is no next opcode */
-    public static final int NO_NEXT = -1;
-
-    /** minimum valid opcode value */
-    public static final int MIN_VALUE = -1;
-
-    /** maximum valid opcode value */
-    public static final int MAX_VALUE = 0xffff;
-
-    // BEGIN(opcodes); GENERATED AUTOMATICALLY BY opcode-gen
-    public static final int NOP = 0x00;
-    public static final int MOVE = 0x01;
-    public static final int MOVE_FROM16 = 0x02;
-    public static final int MOVE_16 = 0x03;
-    public static final int MOVE_WIDE = 0x04;
-    public static final int MOVE_WIDE_FROM16 = 0x05;
-    public static final int MOVE_WIDE_16 = 0x06;
-    public static final int MOVE_OBJECT = 0x07;
-    public static final int MOVE_OBJECT_FROM16 = 0x08;
-    public static final int MOVE_OBJECT_16 = 0x09;
-    public static final int MOVE_RESULT = 0x0a;
-    public static final int MOVE_RESULT_WIDE = 0x0b;
-    public static final int MOVE_RESULT_OBJECT = 0x0c;
-    public static final int MOVE_EXCEPTION = 0x0d;
-    public static final int RETURN_VOID = 0x0e;
-    public static final int RETURN = 0x0f;
-    public static final int RETURN_WIDE = 0x10;
-    public static final int RETURN_OBJECT = 0x11;
-    public static final int CONST_4 = 0x12;
-    public static final int CONST_16 = 0x13;
-    public static final int CONST = 0x14;
-    public static final int CONST_HIGH16 = 0x15;
-    public static final int CONST_WIDE_16 = 0x16;
-    public static final int CONST_WIDE_32 = 0x17;
-    public static final int CONST_WIDE = 0x18;
-    public static final int CONST_WIDE_HIGH16 = 0x19;
-    public static final int CONST_STRING = 0x1a;
-    public static final int CONST_STRING_JUMBO = 0x1b;
-    public static final int CONST_CLASS = 0x1c;
-    public static final int MONITOR_ENTER = 0x1d;
-    public static final int MONITOR_EXIT = 0x1e;
-    public static final int CHECK_CAST = 0x1f;
-    public static final int INSTANCE_OF = 0x20;
-    public static final int ARRAY_LENGTH = 0x21;
-    public static final int NEW_INSTANCE = 0x22;
-    public static final int NEW_ARRAY = 0x23;
-    public static final int FILLED_NEW_ARRAY = 0x24;
-    public static final int FILLED_NEW_ARRAY_RANGE = 0x25;
-    public static final int FILL_ARRAY_DATA = 0x26;
-    public static final int THROW = 0x27;
-    public static final int GOTO = 0x28;
-    public static final int GOTO_16 = 0x29;
-    public static final int GOTO_32 = 0x2a;
-    public static final int PACKED_SWITCH = 0x2b;
-    public static final int SPARSE_SWITCH = 0x2c;
-    public static final int CMPL_FLOAT = 0x2d;
-    public static final int CMPG_FLOAT = 0x2e;
-    public static final int CMPL_DOUBLE = 0x2f;
-    public static final int CMPG_DOUBLE = 0x30;
-    public static final int CMP_LONG = 0x31;
-    public static final int IF_EQ = 0x32;
-    public static final int IF_NE = 0x33;
-    public static final int IF_LT = 0x34;
-    public static final int IF_GE = 0x35;
-    public static final int IF_GT = 0x36;
-    public static final int IF_LE = 0x37;
-    public static final int IF_EQZ = 0x38;
-    public static final int IF_NEZ = 0x39;
-    public static final int IF_LTZ = 0x3a;
-    public static final int IF_GEZ = 0x3b;
-    public static final int IF_GTZ = 0x3c;
-    public static final int IF_LEZ = 0x3d;
-    public static final int AGET = 0x44;
-    public static final int AGET_WIDE = 0x45;
-    public static final int AGET_OBJECT = 0x46;
-    public static final int AGET_BOOLEAN = 0x47;
-    public static final int AGET_BYTE = 0x48;
-    public static final int AGET_CHAR = 0x49;
-    public static final int AGET_SHORT = 0x4a;
-    public static final int APUT = 0x4b;
-    public static final int APUT_WIDE = 0x4c;
-    public static final int APUT_OBJECT = 0x4d;
-    public static final int APUT_BOOLEAN = 0x4e;
-    public static final int APUT_BYTE = 0x4f;
-    public static final int APUT_CHAR = 0x50;
-    public static final int APUT_SHORT = 0x51;
-    public static final int IGET = 0x52;
-    public static final int IGET_WIDE = 0x53;
-    public static final int IGET_OBJECT = 0x54;
-    public static final int IGET_BOOLEAN = 0x55;
-    public static final int IGET_BYTE = 0x56;
-    public static final int IGET_CHAR = 0x57;
-    public static final int IGET_SHORT = 0x58;
-    public static final int IPUT = 0x59;
-    public static final int IPUT_WIDE = 0x5a;
-    public static final int IPUT_OBJECT = 0x5b;
-    public static final int IPUT_BOOLEAN = 0x5c;
-    public static final int IPUT_BYTE = 0x5d;
-    public static final int IPUT_CHAR = 0x5e;
-    public static final int IPUT_SHORT = 0x5f;
-    public static final int SGET = 0x60;
-    public static final int SGET_WIDE = 0x61;
-    public static final int SGET_OBJECT = 0x62;
-    public static final int SGET_BOOLEAN = 0x63;
-    public static final int SGET_BYTE = 0x64;
-    public static final int SGET_CHAR = 0x65;
-    public static final int SGET_SHORT = 0x66;
-    public static final int SPUT = 0x67;
-    public static final int SPUT_WIDE = 0x68;
-    public static final int SPUT_OBJECT = 0x69;
-    public static final int SPUT_BOOLEAN = 0x6a;
-    public static final int SPUT_BYTE = 0x6b;
-    public static final int SPUT_CHAR = 0x6c;
-    public static final int SPUT_SHORT = 0x6d;
-    public static final int INVOKE_VIRTUAL = 0x6e;
-    public static final int INVOKE_SUPER = 0x6f;
-    public static final int INVOKE_DIRECT = 0x70;
-    public static final int INVOKE_STATIC = 0x71;
-    public static final int INVOKE_INTERFACE = 0x72;
-    public static final int INVOKE_VIRTUAL_RANGE = 0x74;
-    public static final int INVOKE_SUPER_RANGE = 0x75;
-    public static final int INVOKE_DIRECT_RANGE = 0x76;
-    public static final int INVOKE_STATIC_RANGE = 0x77;
-    public static final int INVOKE_INTERFACE_RANGE = 0x78;
-    public static final int NEG_INT = 0x7b;
-    public static final int NOT_INT = 0x7c;
-    public static final int NEG_LONG = 0x7d;
-    public static final int NOT_LONG = 0x7e;
-    public static final int NEG_FLOAT = 0x7f;
-    public static final int NEG_DOUBLE = 0x80;
-    public static final int INT_TO_LONG = 0x81;
-    public static final int INT_TO_FLOAT = 0x82;
-    public static final int INT_TO_DOUBLE = 0x83;
-    public static final int LONG_TO_INT = 0x84;
-    public static final int LONG_TO_FLOAT = 0x85;
-    public static final int LONG_TO_DOUBLE = 0x86;
-    public static final int FLOAT_TO_INT = 0x87;
-    public static final int FLOAT_TO_LONG = 0x88;
-    public static final int FLOAT_TO_DOUBLE = 0x89;
-    public static final int DOUBLE_TO_INT = 0x8a;
-    public static final int DOUBLE_TO_LONG = 0x8b;
-    public static final int DOUBLE_TO_FLOAT = 0x8c;
-    public static final int INT_TO_BYTE = 0x8d;
-    public static final int INT_TO_CHAR = 0x8e;
-    public static final int INT_TO_SHORT = 0x8f;
-    public static final int ADD_INT = 0x90;
-    public static final int SUB_INT = 0x91;
-    public static final int MUL_INT = 0x92;
-    public static final int DIV_INT = 0x93;
-    public static final int REM_INT = 0x94;
-    public static final int AND_INT = 0x95;
-    public static final int OR_INT = 0x96;
-    public static final int XOR_INT = 0x97;
-    public static final int SHL_INT = 0x98;
-    public static final int SHR_INT = 0x99;
-    public static final int USHR_INT = 0x9a;
-    public static final int ADD_LONG = 0x9b;
-    public static final int SUB_LONG = 0x9c;
-    public static final int MUL_LONG = 0x9d;
-    public static final int DIV_LONG = 0x9e;
-    public static final int REM_LONG = 0x9f;
-    public static final int AND_LONG = 0xa0;
-    public static final int OR_LONG = 0xa1;
-    public static final int XOR_LONG = 0xa2;
-    public static final int SHL_LONG = 0xa3;
-    public static final int SHR_LONG = 0xa4;
-    public static final int USHR_LONG = 0xa5;
-    public static final int ADD_FLOAT = 0xa6;
-    public static final int SUB_FLOAT = 0xa7;
-    public static final int MUL_FLOAT = 0xa8;
-    public static final int DIV_FLOAT = 0xa9;
-    public static final int REM_FLOAT = 0xaa;
-    public static final int ADD_DOUBLE = 0xab;
-    public static final int SUB_DOUBLE = 0xac;
-    public static final int MUL_DOUBLE = 0xad;
-    public static final int DIV_DOUBLE = 0xae;
-    public static final int REM_DOUBLE = 0xaf;
-    public static final int ADD_INT_2ADDR = 0xb0;
-    public static final int SUB_INT_2ADDR = 0xb1;
-    public static final int MUL_INT_2ADDR = 0xb2;
-    public static final int DIV_INT_2ADDR = 0xb3;
-    public static final int REM_INT_2ADDR = 0xb4;
-    public static final int AND_INT_2ADDR = 0xb5;
-    public static final int OR_INT_2ADDR = 0xb6;
-    public static final int XOR_INT_2ADDR = 0xb7;
-    public static final int SHL_INT_2ADDR = 0xb8;
-    public static final int SHR_INT_2ADDR = 0xb9;
-    public static final int USHR_INT_2ADDR = 0xba;
-    public static final int ADD_LONG_2ADDR = 0xbb;
-    public static final int SUB_LONG_2ADDR = 0xbc;
-    public static final int MUL_LONG_2ADDR = 0xbd;
-    public static final int DIV_LONG_2ADDR = 0xbe;
-    public static final int REM_LONG_2ADDR = 0xbf;
-    public static final int AND_LONG_2ADDR = 0xc0;
-    public static final int OR_LONG_2ADDR = 0xc1;
-    public static final int XOR_LONG_2ADDR = 0xc2;
-    public static final int SHL_LONG_2ADDR = 0xc3;
-    public static final int SHR_LONG_2ADDR = 0xc4;
-    public static final int USHR_LONG_2ADDR = 0xc5;
-    public static final int ADD_FLOAT_2ADDR = 0xc6;
-    public static final int SUB_FLOAT_2ADDR = 0xc7;
-    public static final int MUL_FLOAT_2ADDR = 0xc8;
-    public static final int DIV_FLOAT_2ADDR = 0xc9;
-    public static final int REM_FLOAT_2ADDR = 0xca;
-    public static final int ADD_DOUBLE_2ADDR = 0xcb;
-    public static final int SUB_DOUBLE_2ADDR = 0xcc;
-    public static final int MUL_DOUBLE_2ADDR = 0xcd;
-    public static final int DIV_DOUBLE_2ADDR = 0xce;
-    public static final int REM_DOUBLE_2ADDR = 0xcf;
-    public static final int ADD_INT_LIT16 = 0xd0;
-    public static final int RSUB_INT = 0xd1;
-    public static final int MUL_INT_LIT16 = 0xd2;
-    public static final int DIV_INT_LIT16 = 0xd3;
-    public static final int REM_INT_LIT16 = 0xd4;
-    public static final int AND_INT_LIT16 = 0xd5;
-    public static final int OR_INT_LIT16 = 0xd6;
-    public static final int XOR_INT_LIT16 = 0xd7;
-    public static final int ADD_INT_LIT8 = 0xd8;
-    public static final int RSUB_INT_LIT8 = 0xd9;
-    public static final int MUL_INT_LIT8 = 0xda;
-    public static final int DIV_INT_LIT8 = 0xdb;
-    public static final int REM_INT_LIT8 = 0xdc;
-    public static final int AND_INT_LIT8 = 0xdd;
-    public static final int OR_INT_LIT8 = 0xde;
-    public static final int XOR_INT_LIT8 = 0xdf;
-    public static final int SHL_INT_LIT8 = 0xe0;
-    public static final int SHR_INT_LIT8 = 0xe1;
-    public static final int USHR_INT_LIT8 = 0xe2;
-    public static final int CONST_CLASS_JUMBO = 0x00ff;
-    public static final int CHECK_CAST_JUMBO = 0x01ff;
-    public static final int INSTANCE_OF_JUMBO = 0x02ff;
-    public static final int NEW_INSTANCE_JUMBO = 0x03ff;
-    public static final int NEW_ARRAY_JUMBO = 0x04ff;
-    public static final int FILLED_NEW_ARRAY_JUMBO = 0x05ff;
-    public static final int IGET_JUMBO = 0x06ff;
-    public static final int IGET_WIDE_JUMBO = 0x07ff;
-    public static final int IGET_OBJECT_JUMBO = 0x08ff;
-    public static final int IGET_BOOLEAN_JUMBO = 0x09ff;
-    public static final int IGET_BYTE_JUMBO = 0x0aff;
-    public static final int IGET_CHAR_JUMBO = 0x0bff;
-    public static final int IGET_SHORT_JUMBO = 0x0cff;
-    public static final int IPUT_JUMBO = 0x0dff;
-    public static final int IPUT_WIDE_JUMBO = 0x0eff;
-    public static final int IPUT_OBJECT_JUMBO = 0x0fff;
-    public static final int IPUT_BOOLEAN_JUMBO = 0x10ff;
-    public static final int IPUT_BYTE_JUMBO = 0x11ff;
-    public static final int IPUT_CHAR_JUMBO = 0x12ff;
-    public static final int IPUT_SHORT_JUMBO = 0x13ff;
-    public static final int SGET_JUMBO = 0x14ff;
-    public static final int SGET_WIDE_JUMBO = 0x15ff;
-    public static final int SGET_OBJECT_JUMBO = 0x16ff;
-    public static final int SGET_BOOLEAN_JUMBO = 0x17ff;
-    public static final int SGET_BYTE_JUMBO = 0x18ff;
-    public static final int SGET_CHAR_JUMBO = 0x19ff;
-    public static final int SGET_SHORT_JUMBO = 0x1aff;
-    public static final int SPUT_JUMBO = 0x1bff;
-    public static final int SPUT_WIDE_JUMBO = 0x1cff;
-    public static final int SPUT_OBJECT_JUMBO = 0x1dff;
-    public static final int SPUT_BOOLEAN_JUMBO = 0x1eff;
-    public static final int SPUT_BYTE_JUMBO = 0x1fff;
-    public static final int SPUT_CHAR_JUMBO = 0x20ff;
-    public static final int SPUT_SHORT_JUMBO = 0x21ff;
-    public static final int INVOKE_VIRTUAL_JUMBO = 0x22ff;
-    public static final int INVOKE_SUPER_JUMBO = 0x23ff;
-    public static final int INVOKE_DIRECT_JUMBO = 0x24ff;
-    public static final int INVOKE_STATIC_JUMBO = 0x25ff;
-    public static final int INVOKE_INTERFACE_JUMBO = 0x26ff;
-    // END(opcodes)
-
-    /**
-     * This class is uninstantiable.
-     */
-    private DalvOps() {
-        // This space intentionally left blank.
-    }
-
-    /**
-     * Determines if the given opcode has the right "shape" to be
-     * valid. This includes the range {@code 0x00..0xfe}, the range
-     * {@code 0x00ff..0xffff} where the low-order byte is {@code
-     * 0xff}, and the special opcode values {@code SPECIAL_FORMAT} and
-     * {@code NO_NEXT}. Note that not all of the opcode values that
-     * pass this test are in fact used. This method is meant to
-     * perform a quick check to reject blatantly wrong values (e.g.
-     * when validating arguments).
-     *
-     * @param opcode the opcode value
-     * @return {@code true} iff the value has the right "shape" to be
-     * possibly valid
-     */
-    public static boolean isValidShape(int opcode) {
-        // Note: SPECIAL_FORMAT == NO_NEXT.
-        if ((opcode >= SPECIAL_FORMAT) && (opcode <= 0xff)) {
-            return true;
-        }
-
-        if ((opcode >= 0xff) && (opcode <= 0xffff)
-                && ((opcode & 0xff) == 0xff)) {
-            return true;
-        }
-
-        return false;
-    }
-}
diff --git a/dx/src/com/android/dx/dex/code/Dop.java b/dx/src/com/android/dx/dex/code/Dop.java
index 565d8f9..51d1b51 100644
--- a/dx/src/com/android/dx/dex/code/Dop.java
+++ b/dx/src/com/android/dx/dex/code/Dop.java
@@ -16,20 +16,23 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.OpcodeInfo;
+import com.android.dx.io.Opcodes;
+
 /**
  * Representation of an opcode.
  */
 public final class Dop {
-    /** {@code DalvOps.isValid();} the opcode value itself */
+    /** {@code Opcodes.isValid();} the opcode value itself */
     private final int opcode;
 
-    /** {@code DalvOps.isValid();} the opcode family */
+    /** {@code Opcodes.isValid();} the opcode family */
     private final int family;
 
     /**
-     * {@code DalvOps.isValid();} what opcode (by number) to try next
+     * {@code Opcodes.isValid();} what opcode (by number) to try next
      * when attempting to match an opcode to particular arguments;
-     * {@code DalvOps.NO_NEXT} to indicate that this is the last
+     * {@code Opcodes.NO_NEXT} to indicate that this is the last
      * opcode to try in a particular chain
      */
     private final int nextOpcode;
@@ -40,35 +43,31 @@
     /** whether this opcode uses a result register */
     private final boolean hasResult;
 
-    /** {@code non-null;} the name */
-    private final String name;
-
     /**
      * Constructs an instance.
      *
-     * @param opcode {@code DalvOps.isValid();} the opcode value
+     * @param opcode {@code Opcodes.isValid();} the opcode value
      * itself
-     * @param family {@code DalvOps.isValid();} the opcode family
-     * @param nextOpcode {@code DalvOps.isValid();} what opcode (by
+     * @param family {@code Opcodes.isValid();} the opcode family
+     * @param nextOpcode {@code Opcodes.isValid();} what opcode (by
      * number) to try next when attempting to match an opcode to
-     * particular arguments; {@code DalvOps.NO_NEXT} to indicate that
+     * particular arguments; {@code Opcodes.NO_NEXT} to indicate that
      * this is the last opcode to try in a particular chain
      * @param format {@code non-null;} the instruction format
      * @param hasResult whether the opcode has a result register; if so it
      * is always the first register
-     * @param name {@code non-null;} the name
      */
     public Dop(int opcode, int family, int nextOpcode, InsnFormat format,
-            boolean hasResult, String name) {
-        if (!DalvOps.isValidShape(opcode)) {
+            boolean hasResult) {
+        if (!Opcodes.isValidShape(opcode)) {
             throw new IllegalArgumentException("bogus opcode");
         }
 
-        if (!DalvOps.isValidShape(family)) {
+        if (!Opcodes.isValidShape(family)) {
             throw new IllegalArgumentException("bogus family");
         }
 
-        if (!DalvOps.isValidShape(nextOpcode)) {
+        if (!Opcodes.isValidShape(nextOpcode)) {
             throw new IllegalArgumentException("bogus nextOpcode");
         }
 
@@ -76,28 +75,23 @@
             throw new NullPointerException("format == null");
         }
 
-        if (name == null) {
-            throw new NullPointerException("name == null");
-        }
-
         this.opcode = opcode;
         this.family = family;
         this.nextOpcode = nextOpcode;
         this.format = format;
         this.hasResult = hasResult;
-        this.name = name;
     }
 
     /** {@inheritDoc} */
     @Override
     public String toString() {
-        return name;
+        return getName();
     }
 
     /**
      * Gets the opcode value.
      *
-     * @return {@code DalvOps.MIN_VALUE..DalvOps.MAX_VALUE;} the opcode value
+     * @return {@code Opcodes.MIN_VALUE..Opcodes.MAX_VALUE;} the opcode value
      */
     public int getOpcode() {
         return opcode;
@@ -107,7 +101,7 @@
      * Gets the opcode family. The opcode family is the unmarked (no
      * "/...") opcode that has equivalent semantics to this one.
      *
-     * @return {@code DalvOps.MIN_VALUE..DalvOps.MAX_VALUE;} the opcode family
+     * @return {@code Opcodes.MIN_VALUE..Opcodes.MAX_VALUE;} the opcode family
      */
     public int getFamily() {
         return family;
@@ -137,16 +131,16 @@
      * @return {@code non-null;} the opcode name
      */
     public String getName() {
-        return name;
+        return OpcodeInfo.getName(opcode);
     }
 
     /**
      * Gets the opcode value to try next when attempting to match an
      * opcode to particular arguments. This returns {@code
-     * DalvOps.NO_NEXT} to indicate that this is the last opcode to
+     * Opcodes.NO_NEXT} to indicate that this is the last opcode to
      * try in a particular chain.
      *
-     * @return {@code DalvOps.MIN_VALUE..DalvOps.MAX_VALUE;} the opcode value
+     * @return {@code Opcodes.MIN_VALUE..Opcodes.MAX_VALUE;} the opcode value
      */
     public int getNextOpcode() {
         return nextOpcode;
@@ -160,18 +154,18 @@
      */
     public Dop getOppositeTest() {
         switch (opcode) {
-            case DalvOps.IF_EQ:  return Dops.IF_NE;
-            case DalvOps.IF_NE:  return Dops.IF_EQ;
-            case DalvOps.IF_LT:  return Dops.IF_GE;
-            case DalvOps.IF_GE:  return Dops.IF_LT;
-            case DalvOps.IF_GT:  return Dops.IF_LE;
-            case DalvOps.IF_LE:  return Dops.IF_GT;
-            case DalvOps.IF_EQZ: return Dops.IF_NEZ;
-            case DalvOps.IF_NEZ: return Dops.IF_EQZ;
-            case DalvOps.IF_LTZ: return Dops.IF_GEZ;
-            case DalvOps.IF_GEZ: return Dops.IF_LTZ;
-            case DalvOps.IF_GTZ: return Dops.IF_LEZ;
-            case DalvOps.IF_LEZ: return Dops.IF_GTZ;
+            case Opcodes.IF_EQ:  return Dops.IF_NE;
+            case Opcodes.IF_NE:  return Dops.IF_EQ;
+            case Opcodes.IF_LT:  return Dops.IF_GE;
+            case Opcodes.IF_GE:  return Dops.IF_LT;
+            case Opcodes.IF_GT:  return Dops.IF_LE;
+            case Opcodes.IF_LE:  return Dops.IF_GT;
+            case Opcodes.IF_EQZ: return Dops.IF_NEZ;
+            case Opcodes.IF_NEZ: return Dops.IF_EQZ;
+            case Opcodes.IF_LTZ: return Dops.IF_GEZ;
+            case Opcodes.IF_GEZ: return Dops.IF_LTZ;
+            case Opcodes.IF_GTZ: return Dops.IF_LEZ;
+            case Opcodes.IF_LEZ: return Dops.IF_GTZ;
         }
 
         throw new IllegalArgumentException("bogus opcode: " + this);
diff --git a/dx/src/com/android/dx/dex/code/Dops.java b/dx/src/com/android/dx/dex/code/Dops.java
index 053f47b..667b326 100644
--- a/dx/src/com/android/dx/dex/code/Dops.java
+++ b/dx/src/com/android/dx/dex/code/Dops.java
@@ -44,6 +44,7 @@
 import com.android.dx.dex.code.form.Form52c;
 import com.android.dx.dex.code.form.Form5rc;
 import com.android.dx.dex.code.form.SpecialFormat;
+import com.android.dx.io.Opcodes;
 
 /**
  * Standard instances of {@link Dop} and utility methods for getting
@@ -56,1303 +57,1047 @@
     /**
      * pseudo-opcode used for nonstandard formatted "instructions"
      * (which are mostly not actually instructions, though they do
-     * appear in instruction lists)
+     * appear in instruction lists). TODO: Retire the usage of this
+     * constant.
      */
     public static final Dop SPECIAL_FORMAT =
-        new Dop(DalvOps.SPECIAL_FORMAT, DalvOps.SPECIAL_FORMAT,
-                DalvOps.NO_NEXT, SpecialFormat.THE_ONE, false, "<special>");
+        new Dop(Opcodes.SPECIAL_FORMAT, Opcodes.SPECIAL_FORMAT,
+                Opcodes.NO_NEXT, SpecialFormat.THE_ONE, false);
 
     // BEGIN(dops); GENERATED AUTOMATICALLY BY opcode-gen
     public static final Dop NOP =
-        new Dop(DalvOps.NOP, DalvOps.NOP,
-            DalvOps.NO_NEXT, Form10x.THE_ONE, false,
-            "nop");
+        new Dop(Opcodes.NOP, Opcodes.NOP,
+            Opcodes.NO_NEXT, Form10x.THE_ONE, false);
 
     public static final Dop MOVE =
-        new Dop(DalvOps.MOVE, DalvOps.MOVE,
-            DalvOps.MOVE_FROM16, Form12x.THE_ONE, true,
-            "move");
+        new Dop(Opcodes.MOVE, Opcodes.MOVE,
+            Opcodes.MOVE_FROM16, Form12x.THE_ONE, true);
 
     public static final Dop MOVE_FROM16 =
-        new Dop(DalvOps.MOVE_FROM16, DalvOps.MOVE,
-            DalvOps.MOVE_16, Form22x.THE_ONE, true,
-            "move/from16");
+        new Dop(Opcodes.MOVE_FROM16, Opcodes.MOVE,
+            Opcodes.MOVE_16, Form22x.THE_ONE, true);
 
     public static final Dop MOVE_16 =
-        new Dop(DalvOps.MOVE_16, DalvOps.MOVE,
-            DalvOps.NO_NEXT, Form32x.THE_ONE, true,
-            "move/16");
+        new Dop(Opcodes.MOVE_16, Opcodes.MOVE,
+            Opcodes.NO_NEXT, Form32x.THE_ONE, true);
 
     public static final Dop MOVE_WIDE =
-        new Dop(DalvOps.MOVE_WIDE, DalvOps.MOVE_WIDE,
-            DalvOps.MOVE_WIDE_FROM16, Form12x.THE_ONE, true,
-            "move-wide");
+        new Dop(Opcodes.MOVE_WIDE, Opcodes.MOVE_WIDE,
+            Opcodes.MOVE_WIDE_FROM16, Form12x.THE_ONE, true);
 
     public static final Dop MOVE_WIDE_FROM16 =
-        new Dop(DalvOps.MOVE_WIDE_FROM16, DalvOps.MOVE_WIDE,
-            DalvOps.MOVE_WIDE_16, Form22x.THE_ONE, true,
-            "move-wide/from16");
+        new Dop(Opcodes.MOVE_WIDE_FROM16, Opcodes.MOVE_WIDE,
+            Opcodes.MOVE_WIDE_16, Form22x.THE_ONE, true);
 
     public static final Dop MOVE_WIDE_16 =
-        new Dop(DalvOps.MOVE_WIDE_16, DalvOps.MOVE_WIDE,
-            DalvOps.NO_NEXT, Form32x.THE_ONE, true,
-            "move-wide/16");
+        new Dop(Opcodes.MOVE_WIDE_16, Opcodes.MOVE_WIDE,
+            Opcodes.NO_NEXT, Form32x.THE_ONE, true);
 
     public static final Dop MOVE_OBJECT =
-        new Dop(DalvOps.MOVE_OBJECT, DalvOps.MOVE_OBJECT,
-            DalvOps.MOVE_OBJECT_FROM16, Form12x.THE_ONE, true,
-            "move-object");
+        new Dop(Opcodes.MOVE_OBJECT, Opcodes.MOVE_OBJECT,
+            Opcodes.MOVE_OBJECT_FROM16, Form12x.THE_ONE, true);
 
     public static final Dop MOVE_OBJECT_FROM16 =
-        new Dop(DalvOps.MOVE_OBJECT_FROM16, DalvOps.MOVE_OBJECT,
-            DalvOps.MOVE_OBJECT_16, Form22x.THE_ONE, true,
-            "move-object/from16");
+        new Dop(Opcodes.MOVE_OBJECT_FROM16, Opcodes.MOVE_OBJECT,
+            Opcodes.MOVE_OBJECT_16, Form22x.THE_ONE, true);
 
     public static final Dop MOVE_OBJECT_16 =
-        new Dop(DalvOps.MOVE_OBJECT_16, DalvOps.MOVE_OBJECT,
-            DalvOps.NO_NEXT, Form32x.THE_ONE, true,
-            "move-object/16");
+        new Dop(Opcodes.MOVE_OBJECT_16, Opcodes.MOVE_OBJECT,
+            Opcodes.NO_NEXT, Form32x.THE_ONE, true);
 
     public static final Dop MOVE_RESULT =
-        new Dop(DalvOps.MOVE_RESULT, DalvOps.MOVE_RESULT,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, true,
-            "move-result");
+        new Dop(Opcodes.MOVE_RESULT, Opcodes.MOVE_RESULT,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, true);
 
     public static final Dop MOVE_RESULT_WIDE =
-        new Dop(DalvOps.MOVE_RESULT_WIDE, DalvOps.MOVE_RESULT_WIDE,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, true,
-            "move-result-wide");
+        new Dop(Opcodes.MOVE_RESULT_WIDE, Opcodes.MOVE_RESULT_WIDE,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, true);
 
     public static final Dop MOVE_RESULT_OBJECT =
-        new Dop(DalvOps.MOVE_RESULT_OBJECT, DalvOps.MOVE_RESULT_OBJECT,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, true,
-            "move-result-object");
+        new Dop(Opcodes.MOVE_RESULT_OBJECT, Opcodes.MOVE_RESULT_OBJECT,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, true);
 
     public static final Dop MOVE_EXCEPTION =
-        new Dop(DalvOps.MOVE_EXCEPTION, DalvOps.MOVE_EXCEPTION,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, true,
-            "move-exception");
+        new Dop(Opcodes.MOVE_EXCEPTION, Opcodes.MOVE_EXCEPTION,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, true);
 
     public static final Dop RETURN_VOID =
-        new Dop(DalvOps.RETURN_VOID, DalvOps.RETURN_VOID,
-            DalvOps.NO_NEXT, Form10x.THE_ONE, false,
-            "return-void");
+        new Dop(Opcodes.RETURN_VOID, Opcodes.RETURN_VOID,
+            Opcodes.NO_NEXT, Form10x.THE_ONE, false);
 
     public static final Dop RETURN =
-        new Dop(DalvOps.RETURN, DalvOps.RETURN,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "return");
+        new Dop(Opcodes.RETURN, Opcodes.RETURN,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop RETURN_WIDE =
-        new Dop(DalvOps.RETURN_WIDE, DalvOps.RETURN_WIDE,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "return-wide");
+        new Dop(Opcodes.RETURN_WIDE, Opcodes.RETURN_WIDE,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop RETURN_OBJECT =
-        new Dop(DalvOps.RETURN_OBJECT, DalvOps.RETURN_OBJECT,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "return-object");
+        new Dop(Opcodes.RETURN_OBJECT, Opcodes.RETURN_OBJECT,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop CONST_4 =
-        new Dop(DalvOps.CONST_4, DalvOps.CONST,
-            DalvOps.CONST_16, Form11n.THE_ONE, true,
-            "const/4");
+        new Dop(Opcodes.CONST_4, Opcodes.CONST,
+            Opcodes.CONST_16, Form11n.THE_ONE, true);
 
     public static final Dop CONST_16 =
-        new Dop(DalvOps.CONST_16, DalvOps.CONST,
-            DalvOps.CONST_HIGH16, Form21s.THE_ONE, true,
-            "const/16");
+        new Dop(Opcodes.CONST_16, Opcodes.CONST,
+            Opcodes.CONST_HIGH16, Form21s.THE_ONE, true);
 
     public static final Dop CONST =
-        new Dop(DalvOps.CONST, DalvOps.CONST,
-            DalvOps.NO_NEXT, Form31i.THE_ONE, true,
-            "const");
+        new Dop(Opcodes.CONST, Opcodes.CONST,
+            Opcodes.NO_NEXT, Form31i.THE_ONE, true);
 
     public static final Dop CONST_HIGH16 =
-        new Dop(DalvOps.CONST_HIGH16, DalvOps.CONST,
-            DalvOps.CONST, Form21h.THE_ONE, true,
-            "const/high16");
+        new Dop(Opcodes.CONST_HIGH16, Opcodes.CONST,
+            Opcodes.CONST, Form21h.THE_ONE, true);
 
     public static final Dop CONST_WIDE_16 =
-        new Dop(DalvOps.CONST_WIDE_16, DalvOps.CONST_WIDE,
-            DalvOps.CONST_WIDE_HIGH16, Form21s.THE_ONE, true,
-            "const-wide/16");
+        new Dop(Opcodes.CONST_WIDE_16, Opcodes.CONST_WIDE,
+            Opcodes.CONST_WIDE_HIGH16, Form21s.THE_ONE, true);
 
     public static final Dop CONST_WIDE_32 =
-        new Dop(DalvOps.CONST_WIDE_32, DalvOps.CONST_WIDE,
-            DalvOps.CONST_WIDE, Form31i.THE_ONE, true,
-            "const-wide/32");
+        new Dop(Opcodes.CONST_WIDE_32, Opcodes.CONST_WIDE,
+            Opcodes.CONST_WIDE, Form31i.THE_ONE, true);
 
     public static final Dop CONST_WIDE =
-        new Dop(DalvOps.CONST_WIDE, DalvOps.CONST_WIDE,
-            DalvOps.NO_NEXT, Form51l.THE_ONE, true,
-            "const-wide");
+        new Dop(Opcodes.CONST_WIDE, Opcodes.CONST_WIDE,
+            Opcodes.NO_NEXT, Form51l.THE_ONE, true);
 
     public static final Dop CONST_WIDE_HIGH16 =
-        new Dop(DalvOps.CONST_WIDE_HIGH16, DalvOps.CONST_WIDE,
-            DalvOps.CONST_WIDE_32, Form21h.THE_ONE, true,
-            "const-wide/high16");
+        new Dop(Opcodes.CONST_WIDE_HIGH16, Opcodes.CONST_WIDE,
+            Opcodes.CONST_WIDE_32, Form21h.THE_ONE, true);
 
     public static final Dop CONST_STRING =
-        new Dop(DalvOps.CONST_STRING, DalvOps.CONST_STRING,
-            DalvOps.CONST_STRING_JUMBO, Form21c.THE_ONE, true,
-            "const-string");
+        new Dop(Opcodes.CONST_STRING, Opcodes.CONST_STRING,
+            Opcodes.CONST_STRING_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop CONST_STRING_JUMBO =
-        new Dop(DalvOps.CONST_STRING_JUMBO, DalvOps.CONST_STRING,
-            DalvOps.NO_NEXT, Form31c.THE_ONE, true,
-            "const-string/jumbo");
+        new Dop(Opcodes.CONST_STRING_JUMBO, Opcodes.CONST_STRING,
+            Opcodes.NO_NEXT, Form31c.THE_ONE, true);
 
     public static final Dop CONST_CLASS =
-        new Dop(DalvOps.CONST_CLASS, DalvOps.CONST_CLASS,
-            DalvOps.CONST_CLASS_JUMBO, Form21c.THE_ONE, true,
-            "const-class");
+        new Dop(Opcodes.CONST_CLASS, Opcodes.CONST_CLASS,
+            Opcodes.CONST_CLASS_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop MONITOR_ENTER =
-        new Dop(DalvOps.MONITOR_ENTER, DalvOps.MONITOR_ENTER,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "monitor-enter");
+        new Dop(Opcodes.MONITOR_ENTER, Opcodes.MONITOR_ENTER,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop MONITOR_EXIT =
-        new Dop(DalvOps.MONITOR_EXIT, DalvOps.MONITOR_EXIT,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "monitor-exit");
+        new Dop(Opcodes.MONITOR_EXIT, Opcodes.MONITOR_EXIT,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop CHECK_CAST =
-        new Dop(DalvOps.CHECK_CAST, DalvOps.CHECK_CAST,
-            DalvOps.CHECK_CAST_JUMBO, Form21c.THE_ONE, true,
-            "check-cast");
+        new Dop(Opcodes.CHECK_CAST, Opcodes.CHECK_CAST,
+            Opcodes.CHECK_CAST_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop INSTANCE_OF =
-        new Dop(DalvOps.INSTANCE_OF, DalvOps.INSTANCE_OF,
-            DalvOps.INSTANCE_OF_JUMBO, Form22c.THE_ONE, true,
-            "instance-of");
+        new Dop(Opcodes.INSTANCE_OF, Opcodes.INSTANCE_OF,
+            Opcodes.INSTANCE_OF_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop ARRAY_LENGTH =
-        new Dop(DalvOps.ARRAY_LENGTH, DalvOps.ARRAY_LENGTH,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "array-length");
+        new Dop(Opcodes.ARRAY_LENGTH, Opcodes.ARRAY_LENGTH,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NEW_INSTANCE =
-        new Dop(DalvOps.NEW_INSTANCE, DalvOps.NEW_INSTANCE,
-            DalvOps.NEW_INSTANCE_JUMBO, Form21c.THE_ONE, true,
-            "new-instance");
+        new Dop(Opcodes.NEW_INSTANCE, Opcodes.NEW_INSTANCE,
+            Opcodes.NEW_INSTANCE_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop NEW_ARRAY =
-        new Dop(DalvOps.NEW_ARRAY, DalvOps.NEW_ARRAY,
-            DalvOps.NEW_ARRAY_JUMBO, Form22c.THE_ONE, true,
-            "new-array");
+        new Dop(Opcodes.NEW_ARRAY, Opcodes.NEW_ARRAY,
+            Opcodes.NEW_ARRAY_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop FILLED_NEW_ARRAY =
-        new Dop(DalvOps.FILLED_NEW_ARRAY, DalvOps.FILLED_NEW_ARRAY,
-            DalvOps.FILLED_NEW_ARRAY_RANGE, Form35c.THE_ONE, false,
-            "filled-new-array");
+        new Dop(Opcodes.FILLED_NEW_ARRAY, Opcodes.FILLED_NEW_ARRAY,
+            Opcodes.FILLED_NEW_ARRAY_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop FILLED_NEW_ARRAY_RANGE =
-        new Dop(DalvOps.FILLED_NEW_ARRAY_RANGE, DalvOps.FILLED_NEW_ARRAY,
-            DalvOps.FILLED_NEW_ARRAY_JUMBO, Form3rc.THE_ONE, false,
-            "filled-new-array/range");
+        new Dop(Opcodes.FILLED_NEW_ARRAY_RANGE, Opcodes.FILLED_NEW_ARRAY,
+            Opcodes.FILLED_NEW_ARRAY_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop FILL_ARRAY_DATA =
-        new Dop(DalvOps.FILL_ARRAY_DATA, DalvOps.FILL_ARRAY_DATA,
-            DalvOps.NO_NEXT, Form31t.THE_ONE, false,
-            "fill-array-data");
+        new Dop(Opcodes.FILL_ARRAY_DATA, Opcodes.FILL_ARRAY_DATA,
+            Opcodes.NO_NEXT, Form31t.THE_ONE, false);
 
     public static final Dop THROW =
-        new Dop(DalvOps.THROW, DalvOps.THROW,
-            DalvOps.NO_NEXT, Form11x.THE_ONE, false,
-            "throw");
+        new Dop(Opcodes.THROW, Opcodes.THROW,
+            Opcodes.NO_NEXT, Form11x.THE_ONE, false);
 
     public static final Dop GOTO =
-        new Dop(DalvOps.GOTO, DalvOps.GOTO,
-            DalvOps.GOTO_16, Form10t.THE_ONE, false,
-            "goto");
+        new Dop(Opcodes.GOTO, Opcodes.GOTO,
+            Opcodes.GOTO_16, Form10t.THE_ONE, false);
 
     public static final Dop GOTO_16 =
-        new Dop(DalvOps.GOTO_16, DalvOps.GOTO,
-            DalvOps.GOTO_32, Form20t.THE_ONE, false,
-            "goto/16");
+        new Dop(Opcodes.GOTO_16, Opcodes.GOTO,
+            Opcodes.GOTO_32, Form20t.THE_ONE, false);
 
     public static final Dop GOTO_32 =
-        new Dop(DalvOps.GOTO_32, DalvOps.GOTO,
-            DalvOps.NO_NEXT, Form30t.THE_ONE, false,
-            "goto/32");
+        new Dop(Opcodes.GOTO_32, Opcodes.GOTO,
+            Opcodes.NO_NEXT, Form30t.THE_ONE, false);
 
     public static final Dop PACKED_SWITCH =
-        new Dop(DalvOps.PACKED_SWITCH, DalvOps.PACKED_SWITCH,
-            DalvOps.NO_NEXT, Form31t.THE_ONE, false,
-            "packed-switch");
+        new Dop(Opcodes.PACKED_SWITCH, Opcodes.PACKED_SWITCH,
+            Opcodes.NO_NEXT, Form31t.THE_ONE, false);
 
     public static final Dop SPARSE_SWITCH =
-        new Dop(DalvOps.SPARSE_SWITCH, DalvOps.SPARSE_SWITCH,
-            DalvOps.NO_NEXT, Form31t.THE_ONE, false,
-            "sparse-switch");
+        new Dop(Opcodes.SPARSE_SWITCH, Opcodes.SPARSE_SWITCH,
+            Opcodes.NO_NEXT, Form31t.THE_ONE, false);
 
     public static final Dop CMPL_FLOAT =
-        new Dop(DalvOps.CMPL_FLOAT, DalvOps.CMPL_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "cmpl-float");
+        new Dop(Opcodes.CMPL_FLOAT, Opcodes.CMPL_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop CMPG_FLOAT =
-        new Dop(DalvOps.CMPG_FLOAT, DalvOps.CMPG_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "cmpg-float");
+        new Dop(Opcodes.CMPG_FLOAT, Opcodes.CMPG_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop CMPL_DOUBLE =
-        new Dop(DalvOps.CMPL_DOUBLE, DalvOps.CMPL_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "cmpl-double");
+        new Dop(Opcodes.CMPL_DOUBLE, Opcodes.CMPL_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop CMPG_DOUBLE =
-        new Dop(DalvOps.CMPG_DOUBLE, DalvOps.CMPG_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "cmpg-double");
+        new Dop(Opcodes.CMPG_DOUBLE, Opcodes.CMPG_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop CMP_LONG =
-        new Dop(DalvOps.CMP_LONG, DalvOps.CMP_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "cmp-long");
+        new Dop(Opcodes.CMP_LONG, Opcodes.CMP_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop IF_EQ =
-        new Dop(DalvOps.IF_EQ, DalvOps.IF_EQ,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-eq");
+        new Dop(Opcodes.IF_EQ, Opcodes.IF_EQ,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_NE =
-        new Dop(DalvOps.IF_NE, DalvOps.IF_NE,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-ne");
+        new Dop(Opcodes.IF_NE, Opcodes.IF_NE,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_LT =
-        new Dop(DalvOps.IF_LT, DalvOps.IF_LT,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-lt");
+        new Dop(Opcodes.IF_LT, Opcodes.IF_LT,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_GE =
-        new Dop(DalvOps.IF_GE, DalvOps.IF_GE,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-ge");
+        new Dop(Opcodes.IF_GE, Opcodes.IF_GE,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_GT =
-        new Dop(DalvOps.IF_GT, DalvOps.IF_GT,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-gt");
+        new Dop(Opcodes.IF_GT, Opcodes.IF_GT,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_LE =
-        new Dop(DalvOps.IF_LE, DalvOps.IF_LE,
-            DalvOps.NO_NEXT, Form22t.THE_ONE, false,
-            "if-le");
+        new Dop(Opcodes.IF_LE, Opcodes.IF_LE,
+            Opcodes.NO_NEXT, Form22t.THE_ONE, false);
 
     public static final Dop IF_EQZ =
-        new Dop(DalvOps.IF_EQZ, DalvOps.IF_EQZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-eqz");
+        new Dop(Opcodes.IF_EQZ, Opcodes.IF_EQZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop IF_NEZ =
-        new Dop(DalvOps.IF_NEZ, DalvOps.IF_NEZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-nez");
+        new Dop(Opcodes.IF_NEZ, Opcodes.IF_NEZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop IF_LTZ =
-        new Dop(DalvOps.IF_LTZ, DalvOps.IF_LTZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-ltz");
+        new Dop(Opcodes.IF_LTZ, Opcodes.IF_LTZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop IF_GEZ =
-        new Dop(DalvOps.IF_GEZ, DalvOps.IF_GEZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-gez");
+        new Dop(Opcodes.IF_GEZ, Opcodes.IF_GEZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop IF_GTZ =
-        new Dop(DalvOps.IF_GTZ, DalvOps.IF_GTZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-gtz");
+        new Dop(Opcodes.IF_GTZ, Opcodes.IF_GTZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop IF_LEZ =
-        new Dop(DalvOps.IF_LEZ, DalvOps.IF_LEZ,
-            DalvOps.NO_NEXT, Form21t.THE_ONE, false,
-            "if-lez");
+        new Dop(Opcodes.IF_LEZ, Opcodes.IF_LEZ,
+            Opcodes.NO_NEXT, Form21t.THE_ONE, false);
 
     public static final Dop AGET =
-        new Dop(DalvOps.AGET, DalvOps.AGET,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget");
+        new Dop(Opcodes.AGET, Opcodes.AGET,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_WIDE =
-        new Dop(DalvOps.AGET_WIDE, DalvOps.AGET_WIDE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-wide");
+        new Dop(Opcodes.AGET_WIDE, Opcodes.AGET_WIDE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_OBJECT =
-        new Dop(DalvOps.AGET_OBJECT, DalvOps.AGET_OBJECT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-object");
+        new Dop(Opcodes.AGET_OBJECT, Opcodes.AGET_OBJECT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_BOOLEAN =
-        new Dop(DalvOps.AGET_BOOLEAN, DalvOps.AGET_BOOLEAN,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-boolean");
+        new Dop(Opcodes.AGET_BOOLEAN, Opcodes.AGET_BOOLEAN,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_BYTE =
-        new Dop(DalvOps.AGET_BYTE, DalvOps.AGET_BYTE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-byte");
+        new Dop(Opcodes.AGET_BYTE, Opcodes.AGET_BYTE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_CHAR =
-        new Dop(DalvOps.AGET_CHAR, DalvOps.AGET_CHAR,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-char");
+        new Dop(Opcodes.AGET_CHAR, Opcodes.AGET_CHAR,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AGET_SHORT =
-        new Dop(DalvOps.AGET_SHORT, DalvOps.AGET_SHORT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "aget-short");
+        new Dop(Opcodes.AGET_SHORT, Opcodes.AGET_SHORT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop APUT =
-        new Dop(DalvOps.APUT, DalvOps.APUT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput");
+        new Dop(Opcodes.APUT, Opcodes.APUT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_WIDE =
-        new Dop(DalvOps.APUT_WIDE, DalvOps.APUT_WIDE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-wide");
+        new Dop(Opcodes.APUT_WIDE, Opcodes.APUT_WIDE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_OBJECT =
-        new Dop(DalvOps.APUT_OBJECT, DalvOps.APUT_OBJECT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-object");
+        new Dop(Opcodes.APUT_OBJECT, Opcodes.APUT_OBJECT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_BOOLEAN =
-        new Dop(DalvOps.APUT_BOOLEAN, DalvOps.APUT_BOOLEAN,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-boolean");
+        new Dop(Opcodes.APUT_BOOLEAN, Opcodes.APUT_BOOLEAN,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_BYTE =
-        new Dop(DalvOps.APUT_BYTE, DalvOps.APUT_BYTE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-byte");
+        new Dop(Opcodes.APUT_BYTE, Opcodes.APUT_BYTE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_CHAR =
-        new Dop(DalvOps.APUT_CHAR, DalvOps.APUT_CHAR,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-char");
+        new Dop(Opcodes.APUT_CHAR, Opcodes.APUT_CHAR,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop APUT_SHORT =
-        new Dop(DalvOps.APUT_SHORT, DalvOps.APUT_SHORT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, false,
-            "aput-short");
+        new Dop(Opcodes.APUT_SHORT, Opcodes.APUT_SHORT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, false);
 
     public static final Dop IGET =
-        new Dop(DalvOps.IGET, DalvOps.IGET,
-            DalvOps.IGET_JUMBO, Form22c.THE_ONE, true,
-            "iget");
+        new Dop(Opcodes.IGET, Opcodes.IGET,
+            Opcodes.IGET_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_WIDE =
-        new Dop(DalvOps.IGET_WIDE, DalvOps.IGET_WIDE,
-            DalvOps.IGET_WIDE_JUMBO, Form22c.THE_ONE, true,
-            "iget-wide");
+        new Dop(Opcodes.IGET_WIDE, Opcodes.IGET_WIDE,
+            Opcodes.IGET_WIDE_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_OBJECT =
-        new Dop(DalvOps.IGET_OBJECT, DalvOps.IGET_OBJECT,
-            DalvOps.IGET_OBJECT_JUMBO, Form22c.THE_ONE, true,
-            "iget-object");
+        new Dop(Opcodes.IGET_OBJECT, Opcodes.IGET_OBJECT,
+            Opcodes.IGET_OBJECT_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_BOOLEAN =
-        new Dop(DalvOps.IGET_BOOLEAN, DalvOps.IGET_BOOLEAN,
-            DalvOps.IGET_BOOLEAN_JUMBO, Form22c.THE_ONE, true,
-            "iget-boolean");
+        new Dop(Opcodes.IGET_BOOLEAN, Opcodes.IGET_BOOLEAN,
+            Opcodes.IGET_BOOLEAN_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_BYTE =
-        new Dop(DalvOps.IGET_BYTE, DalvOps.IGET_BYTE,
-            DalvOps.IGET_BYTE_JUMBO, Form22c.THE_ONE, true,
-            "iget-byte");
+        new Dop(Opcodes.IGET_BYTE, Opcodes.IGET_BYTE,
+            Opcodes.IGET_BYTE_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_CHAR =
-        new Dop(DalvOps.IGET_CHAR, DalvOps.IGET_CHAR,
-            DalvOps.IGET_CHAR_JUMBO, Form22c.THE_ONE, true,
-            "iget-char");
+        new Dop(Opcodes.IGET_CHAR, Opcodes.IGET_CHAR,
+            Opcodes.IGET_CHAR_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IGET_SHORT =
-        new Dop(DalvOps.IGET_SHORT, DalvOps.IGET_SHORT,
-            DalvOps.IGET_SHORT_JUMBO, Form22c.THE_ONE, true,
-            "iget-short");
+        new Dop(Opcodes.IGET_SHORT, Opcodes.IGET_SHORT,
+            Opcodes.IGET_SHORT_JUMBO, Form22c.THE_ONE, true);
 
     public static final Dop IPUT =
-        new Dop(DalvOps.IPUT, DalvOps.IPUT,
-            DalvOps.IPUT_JUMBO, Form22c.THE_ONE, false,
-            "iput");
+        new Dop(Opcodes.IPUT, Opcodes.IPUT,
+            Opcodes.IPUT_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_WIDE =
-        new Dop(DalvOps.IPUT_WIDE, DalvOps.IPUT_WIDE,
-            DalvOps.IPUT_WIDE_JUMBO, Form22c.THE_ONE, false,
-            "iput-wide");
+        new Dop(Opcodes.IPUT_WIDE, Opcodes.IPUT_WIDE,
+            Opcodes.IPUT_WIDE_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_OBJECT =
-        new Dop(DalvOps.IPUT_OBJECT, DalvOps.IPUT_OBJECT,
-            DalvOps.IPUT_OBJECT_JUMBO, Form22c.THE_ONE, false,
-            "iput-object");
+        new Dop(Opcodes.IPUT_OBJECT, Opcodes.IPUT_OBJECT,
+            Opcodes.IPUT_OBJECT_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_BOOLEAN =
-        new Dop(DalvOps.IPUT_BOOLEAN, DalvOps.IPUT_BOOLEAN,
-            DalvOps.IPUT_BOOLEAN_JUMBO, Form22c.THE_ONE, false,
-            "iput-boolean");
+        new Dop(Opcodes.IPUT_BOOLEAN, Opcodes.IPUT_BOOLEAN,
+            Opcodes.IPUT_BOOLEAN_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_BYTE =
-        new Dop(DalvOps.IPUT_BYTE, DalvOps.IPUT_BYTE,
-            DalvOps.IPUT_BYTE_JUMBO, Form22c.THE_ONE, false,
-            "iput-byte");
+        new Dop(Opcodes.IPUT_BYTE, Opcodes.IPUT_BYTE,
+            Opcodes.IPUT_BYTE_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_CHAR =
-        new Dop(DalvOps.IPUT_CHAR, DalvOps.IPUT_CHAR,
-            DalvOps.IPUT_CHAR_JUMBO, Form22c.THE_ONE, false,
-            "iput-char");
+        new Dop(Opcodes.IPUT_CHAR, Opcodes.IPUT_CHAR,
+            Opcodes.IPUT_CHAR_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop IPUT_SHORT =
-        new Dop(DalvOps.IPUT_SHORT, DalvOps.IPUT_SHORT,
-            DalvOps.IPUT_SHORT_JUMBO, Form22c.THE_ONE, false,
-            "iput-short");
+        new Dop(Opcodes.IPUT_SHORT, Opcodes.IPUT_SHORT,
+            Opcodes.IPUT_SHORT_JUMBO, Form22c.THE_ONE, false);
 
     public static final Dop SGET =
-        new Dop(DalvOps.SGET, DalvOps.SGET,
-            DalvOps.SGET_JUMBO, Form21c.THE_ONE, true,
-            "sget");
+        new Dop(Opcodes.SGET, Opcodes.SGET,
+            Opcodes.SGET_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_WIDE =
-        new Dop(DalvOps.SGET_WIDE, DalvOps.SGET_WIDE,
-            DalvOps.SGET_WIDE_JUMBO, Form21c.THE_ONE, true,
-            "sget-wide");
+        new Dop(Opcodes.SGET_WIDE, Opcodes.SGET_WIDE,
+            Opcodes.SGET_WIDE_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_OBJECT =
-        new Dop(DalvOps.SGET_OBJECT, DalvOps.SGET_OBJECT,
-            DalvOps.SGET_OBJECT_JUMBO, Form21c.THE_ONE, true,
-            "sget-object");
+        new Dop(Opcodes.SGET_OBJECT, Opcodes.SGET_OBJECT,
+            Opcodes.SGET_OBJECT_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_BOOLEAN =
-        new Dop(DalvOps.SGET_BOOLEAN, DalvOps.SGET_BOOLEAN,
-            DalvOps.SGET_BOOLEAN_JUMBO, Form21c.THE_ONE, true,
-            "sget-boolean");
+        new Dop(Opcodes.SGET_BOOLEAN, Opcodes.SGET_BOOLEAN,
+            Opcodes.SGET_BOOLEAN_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_BYTE =
-        new Dop(DalvOps.SGET_BYTE, DalvOps.SGET_BYTE,
-            DalvOps.SGET_BYTE_JUMBO, Form21c.THE_ONE, true,
-            "sget-byte");
+        new Dop(Opcodes.SGET_BYTE, Opcodes.SGET_BYTE,
+            Opcodes.SGET_BYTE_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_CHAR =
-        new Dop(DalvOps.SGET_CHAR, DalvOps.SGET_CHAR,
-            DalvOps.SGET_CHAR_JUMBO, Form21c.THE_ONE, true,
-            "sget-char");
+        new Dop(Opcodes.SGET_CHAR, Opcodes.SGET_CHAR,
+            Opcodes.SGET_CHAR_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SGET_SHORT =
-        new Dop(DalvOps.SGET_SHORT, DalvOps.SGET_SHORT,
-            DalvOps.SGET_SHORT_JUMBO, Form21c.THE_ONE, true,
-            "sget-short");
+        new Dop(Opcodes.SGET_SHORT, Opcodes.SGET_SHORT,
+            Opcodes.SGET_SHORT_JUMBO, Form21c.THE_ONE, true);
 
     public static final Dop SPUT =
-        new Dop(DalvOps.SPUT, DalvOps.SPUT,
-            DalvOps.SPUT_JUMBO, Form21c.THE_ONE, false,
-            "sput");
+        new Dop(Opcodes.SPUT, Opcodes.SPUT,
+            Opcodes.SPUT_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_WIDE =
-        new Dop(DalvOps.SPUT_WIDE, DalvOps.SPUT_WIDE,
-            DalvOps.SPUT_WIDE_JUMBO, Form21c.THE_ONE, false,
-            "sput-wide");
+        new Dop(Opcodes.SPUT_WIDE, Opcodes.SPUT_WIDE,
+            Opcodes.SPUT_WIDE_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_OBJECT =
-        new Dop(DalvOps.SPUT_OBJECT, DalvOps.SPUT_OBJECT,
-            DalvOps.SPUT_OBJECT_JUMBO, Form21c.THE_ONE, false,
-            "sput-object");
+        new Dop(Opcodes.SPUT_OBJECT, Opcodes.SPUT_OBJECT,
+            Opcodes.SPUT_OBJECT_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_BOOLEAN =
-        new Dop(DalvOps.SPUT_BOOLEAN, DalvOps.SPUT_BOOLEAN,
-            DalvOps.SPUT_BOOLEAN_JUMBO, Form21c.THE_ONE, false,
-            "sput-boolean");
+        new Dop(Opcodes.SPUT_BOOLEAN, Opcodes.SPUT_BOOLEAN,
+            Opcodes.SPUT_BOOLEAN_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_BYTE =
-        new Dop(DalvOps.SPUT_BYTE, DalvOps.SPUT_BYTE,
-            DalvOps.SPUT_BYTE_JUMBO, Form21c.THE_ONE, false,
-            "sput-byte");
+        new Dop(Opcodes.SPUT_BYTE, Opcodes.SPUT_BYTE,
+            Opcodes.SPUT_BYTE_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_CHAR =
-        new Dop(DalvOps.SPUT_CHAR, DalvOps.SPUT_CHAR,
-            DalvOps.SPUT_CHAR_JUMBO, Form21c.THE_ONE, false,
-            "sput-char");
+        new Dop(Opcodes.SPUT_CHAR, Opcodes.SPUT_CHAR,
+            Opcodes.SPUT_CHAR_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop SPUT_SHORT =
-        new Dop(DalvOps.SPUT_SHORT, DalvOps.SPUT_SHORT,
-            DalvOps.SPUT_SHORT_JUMBO, Form21c.THE_ONE, false,
-            "sput-short");
+        new Dop(Opcodes.SPUT_SHORT, Opcodes.SPUT_SHORT,
+            Opcodes.SPUT_SHORT_JUMBO, Form21c.THE_ONE, false);
 
     public static final Dop INVOKE_VIRTUAL =
-        new Dop(DalvOps.INVOKE_VIRTUAL, DalvOps.INVOKE_VIRTUAL,
-            DalvOps.INVOKE_VIRTUAL_RANGE, Form35c.THE_ONE, false,
-            "invoke-virtual");
+        new Dop(Opcodes.INVOKE_VIRTUAL, Opcodes.INVOKE_VIRTUAL,
+            Opcodes.INVOKE_VIRTUAL_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop INVOKE_SUPER =
-        new Dop(DalvOps.INVOKE_SUPER, DalvOps.INVOKE_SUPER,
-            DalvOps.INVOKE_SUPER_RANGE, Form35c.THE_ONE, false,
-            "invoke-super");
+        new Dop(Opcodes.INVOKE_SUPER, Opcodes.INVOKE_SUPER,
+            Opcodes.INVOKE_SUPER_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop INVOKE_DIRECT =
-        new Dop(DalvOps.INVOKE_DIRECT, DalvOps.INVOKE_DIRECT,
-            DalvOps.INVOKE_DIRECT_RANGE, Form35c.THE_ONE, false,
-            "invoke-direct");
+        new Dop(Opcodes.INVOKE_DIRECT, Opcodes.INVOKE_DIRECT,
+            Opcodes.INVOKE_DIRECT_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop INVOKE_STATIC =
-        new Dop(DalvOps.INVOKE_STATIC, DalvOps.INVOKE_STATIC,
-            DalvOps.INVOKE_STATIC_RANGE, Form35c.THE_ONE, false,
-            "invoke-static");
+        new Dop(Opcodes.INVOKE_STATIC, Opcodes.INVOKE_STATIC,
+            Opcodes.INVOKE_STATIC_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop INVOKE_INTERFACE =
-        new Dop(DalvOps.INVOKE_INTERFACE, DalvOps.INVOKE_INTERFACE,
-            DalvOps.INVOKE_INTERFACE_RANGE, Form35c.THE_ONE, false,
-            "invoke-interface");
+        new Dop(Opcodes.INVOKE_INTERFACE, Opcodes.INVOKE_INTERFACE,
+            Opcodes.INVOKE_INTERFACE_RANGE, Form35c.THE_ONE, false);
 
     public static final Dop INVOKE_VIRTUAL_RANGE =
-        new Dop(DalvOps.INVOKE_VIRTUAL_RANGE, DalvOps.INVOKE_VIRTUAL,
-            DalvOps.INVOKE_VIRTUAL_JUMBO, Form3rc.THE_ONE, false,
-            "invoke-virtual/range");
+        new Dop(Opcodes.INVOKE_VIRTUAL_RANGE, Opcodes.INVOKE_VIRTUAL,
+            Opcodes.INVOKE_VIRTUAL_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop INVOKE_SUPER_RANGE =
-        new Dop(DalvOps.INVOKE_SUPER_RANGE, DalvOps.INVOKE_SUPER,
-            DalvOps.INVOKE_SUPER_JUMBO, Form3rc.THE_ONE, false,
-            "invoke-super/range");
+        new Dop(Opcodes.INVOKE_SUPER_RANGE, Opcodes.INVOKE_SUPER,
+            Opcodes.INVOKE_SUPER_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop INVOKE_DIRECT_RANGE =
-        new Dop(DalvOps.INVOKE_DIRECT_RANGE, DalvOps.INVOKE_DIRECT,
-            DalvOps.INVOKE_DIRECT_JUMBO, Form3rc.THE_ONE, false,
-            "invoke-direct/range");
+        new Dop(Opcodes.INVOKE_DIRECT_RANGE, Opcodes.INVOKE_DIRECT,
+            Opcodes.INVOKE_DIRECT_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop INVOKE_STATIC_RANGE =
-        new Dop(DalvOps.INVOKE_STATIC_RANGE, DalvOps.INVOKE_STATIC,
-            DalvOps.INVOKE_STATIC_JUMBO, Form3rc.THE_ONE, false,
-            "invoke-static/range");
+        new Dop(Opcodes.INVOKE_STATIC_RANGE, Opcodes.INVOKE_STATIC,
+            Opcodes.INVOKE_STATIC_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop INVOKE_INTERFACE_RANGE =
-        new Dop(DalvOps.INVOKE_INTERFACE_RANGE, DalvOps.INVOKE_INTERFACE,
-            DalvOps.INVOKE_INTERFACE_JUMBO, Form3rc.THE_ONE, false,
-            "invoke-interface/range");
+        new Dop(Opcodes.INVOKE_INTERFACE_RANGE, Opcodes.INVOKE_INTERFACE,
+            Opcodes.INVOKE_INTERFACE_JUMBO, Form3rc.THE_ONE, false);
 
     public static final Dop NEG_INT =
-        new Dop(DalvOps.NEG_INT, DalvOps.NEG_INT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "neg-int");
+        new Dop(Opcodes.NEG_INT, Opcodes.NEG_INT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NOT_INT =
-        new Dop(DalvOps.NOT_INT, DalvOps.NOT_INT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "not-int");
+        new Dop(Opcodes.NOT_INT, Opcodes.NOT_INT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NEG_LONG =
-        new Dop(DalvOps.NEG_LONG, DalvOps.NEG_LONG,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "neg-long");
+        new Dop(Opcodes.NEG_LONG, Opcodes.NEG_LONG,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NOT_LONG =
-        new Dop(DalvOps.NOT_LONG, DalvOps.NOT_LONG,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "not-long");
+        new Dop(Opcodes.NOT_LONG, Opcodes.NOT_LONG,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NEG_FLOAT =
-        new Dop(DalvOps.NEG_FLOAT, DalvOps.NEG_FLOAT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "neg-float");
+        new Dop(Opcodes.NEG_FLOAT, Opcodes.NEG_FLOAT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop NEG_DOUBLE =
-        new Dop(DalvOps.NEG_DOUBLE, DalvOps.NEG_DOUBLE,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "neg-double");
+        new Dop(Opcodes.NEG_DOUBLE, Opcodes.NEG_DOUBLE,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_LONG =
-        new Dop(DalvOps.INT_TO_LONG, DalvOps.INT_TO_LONG,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-long");
+        new Dop(Opcodes.INT_TO_LONG, Opcodes.INT_TO_LONG,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_FLOAT =
-        new Dop(DalvOps.INT_TO_FLOAT, DalvOps.INT_TO_FLOAT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-float");
+        new Dop(Opcodes.INT_TO_FLOAT, Opcodes.INT_TO_FLOAT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_DOUBLE =
-        new Dop(DalvOps.INT_TO_DOUBLE, DalvOps.INT_TO_DOUBLE,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-double");
+        new Dop(Opcodes.INT_TO_DOUBLE, Opcodes.INT_TO_DOUBLE,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop LONG_TO_INT =
-        new Dop(DalvOps.LONG_TO_INT, DalvOps.LONG_TO_INT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "long-to-int");
+        new Dop(Opcodes.LONG_TO_INT, Opcodes.LONG_TO_INT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop LONG_TO_FLOAT =
-        new Dop(DalvOps.LONG_TO_FLOAT, DalvOps.LONG_TO_FLOAT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "long-to-float");
+        new Dop(Opcodes.LONG_TO_FLOAT, Opcodes.LONG_TO_FLOAT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop LONG_TO_DOUBLE =
-        new Dop(DalvOps.LONG_TO_DOUBLE, DalvOps.LONG_TO_DOUBLE,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "long-to-double");
+        new Dop(Opcodes.LONG_TO_DOUBLE, Opcodes.LONG_TO_DOUBLE,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop FLOAT_TO_INT =
-        new Dop(DalvOps.FLOAT_TO_INT, DalvOps.FLOAT_TO_INT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "float-to-int");
+        new Dop(Opcodes.FLOAT_TO_INT, Opcodes.FLOAT_TO_INT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop FLOAT_TO_LONG =
-        new Dop(DalvOps.FLOAT_TO_LONG, DalvOps.FLOAT_TO_LONG,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "float-to-long");
+        new Dop(Opcodes.FLOAT_TO_LONG, Opcodes.FLOAT_TO_LONG,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop FLOAT_TO_DOUBLE =
-        new Dop(DalvOps.FLOAT_TO_DOUBLE, DalvOps.FLOAT_TO_DOUBLE,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "float-to-double");
+        new Dop(Opcodes.FLOAT_TO_DOUBLE, Opcodes.FLOAT_TO_DOUBLE,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop DOUBLE_TO_INT =
-        new Dop(DalvOps.DOUBLE_TO_INT, DalvOps.DOUBLE_TO_INT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "double-to-int");
+        new Dop(Opcodes.DOUBLE_TO_INT, Opcodes.DOUBLE_TO_INT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop DOUBLE_TO_LONG =
-        new Dop(DalvOps.DOUBLE_TO_LONG, DalvOps.DOUBLE_TO_LONG,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "double-to-long");
+        new Dop(Opcodes.DOUBLE_TO_LONG, Opcodes.DOUBLE_TO_LONG,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop DOUBLE_TO_FLOAT =
-        new Dop(DalvOps.DOUBLE_TO_FLOAT, DalvOps.DOUBLE_TO_FLOAT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "double-to-float");
+        new Dop(Opcodes.DOUBLE_TO_FLOAT, Opcodes.DOUBLE_TO_FLOAT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_BYTE =
-        new Dop(DalvOps.INT_TO_BYTE, DalvOps.INT_TO_BYTE,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-byte");
+        new Dop(Opcodes.INT_TO_BYTE, Opcodes.INT_TO_BYTE,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_CHAR =
-        new Dop(DalvOps.INT_TO_CHAR, DalvOps.INT_TO_CHAR,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-char");
+        new Dop(Opcodes.INT_TO_CHAR, Opcodes.INT_TO_CHAR,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop INT_TO_SHORT =
-        new Dop(DalvOps.INT_TO_SHORT, DalvOps.INT_TO_SHORT,
-            DalvOps.NO_NEXT, Form12x.THE_ONE, true,
-            "int-to-short");
+        new Dop(Opcodes.INT_TO_SHORT, Opcodes.INT_TO_SHORT,
+            Opcodes.NO_NEXT, Form12x.THE_ONE, true);
 
     public static final Dop ADD_INT =
-        new Dop(DalvOps.ADD_INT, DalvOps.ADD_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "add-int");
+        new Dop(Opcodes.ADD_INT, Opcodes.ADD_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SUB_INT =
-        new Dop(DalvOps.SUB_INT, DalvOps.SUB_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "sub-int");
+        new Dop(Opcodes.SUB_INT, Opcodes.SUB_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop MUL_INT =
-        new Dop(DalvOps.MUL_INT, DalvOps.MUL_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "mul-int");
+        new Dop(Opcodes.MUL_INT, Opcodes.MUL_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop DIV_INT =
-        new Dop(DalvOps.DIV_INT, DalvOps.DIV_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "div-int");
+        new Dop(Opcodes.DIV_INT, Opcodes.DIV_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop REM_INT =
-        new Dop(DalvOps.REM_INT, DalvOps.REM_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "rem-int");
+        new Dop(Opcodes.REM_INT, Opcodes.REM_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AND_INT =
-        new Dop(DalvOps.AND_INT, DalvOps.AND_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "and-int");
+        new Dop(Opcodes.AND_INT, Opcodes.AND_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop OR_INT =
-        new Dop(DalvOps.OR_INT, DalvOps.OR_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "or-int");
+        new Dop(Opcodes.OR_INT, Opcodes.OR_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop XOR_INT =
-        new Dop(DalvOps.XOR_INT, DalvOps.XOR_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "xor-int");
+        new Dop(Opcodes.XOR_INT, Opcodes.XOR_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SHL_INT =
-        new Dop(DalvOps.SHL_INT, DalvOps.SHL_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "shl-int");
+        new Dop(Opcodes.SHL_INT, Opcodes.SHL_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SHR_INT =
-        new Dop(DalvOps.SHR_INT, DalvOps.SHR_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "shr-int");
+        new Dop(Opcodes.SHR_INT, Opcodes.SHR_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop USHR_INT =
-        new Dop(DalvOps.USHR_INT, DalvOps.USHR_INT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "ushr-int");
+        new Dop(Opcodes.USHR_INT, Opcodes.USHR_INT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop ADD_LONG =
-        new Dop(DalvOps.ADD_LONG, DalvOps.ADD_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "add-long");
+        new Dop(Opcodes.ADD_LONG, Opcodes.ADD_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SUB_LONG =
-        new Dop(DalvOps.SUB_LONG, DalvOps.SUB_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "sub-long");
+        new Dop(Opcodes.SUB_LONG, Opcodes.SUB_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop MUL_LONG =
-        new Dop(DalvOps.MUL_LONG, DalvOps.MUL_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "mul-long");
+        new Dop(Opcodes.MUL_LONG, Opcodes.MUL_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop DIV_LONG =
-        new Dop(DalvOps.DIV_LONG, DalvOps.DIV_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "div-long");
+        new Dop(Opcodes.DIV_LONG, Opcodes.DIV_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop REM_LONG =
-        new Dop(DalvOps.REM_LONG, DalvOps.REM_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "rem-long");
+        new Dop(Opcodes.REM_LONG, Opcodes.REM_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop AND_LONG =
-        new Dop(DalvOps.AND_LONG, DalvOps.AND_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "and-long");
+        new Dop(Opcodes.AND_LONG, Opcodes.AND_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop OR_LONG =
-        new Dop(DalvOps.OR_LONG, DalvOps.OR_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "or-long");
+        new Dop(Opcodes.OR_LONG, Opcodes.OR_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop XOR_LONG =
-        new Dop(DalvOps.XOR_LONG, DalvOps.XOR_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "xor-long");
+        new Dop(Opcodes.XOR_LONG, Opcodes.XOR_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SHL_LONG =
-        new Dop(DalvOps.SHL_LONG, DalvOps.SHL_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "shl-long");
+        new Dop(Opcodes.SHL_LONG, Opcodes.SHL_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SHR_LONG =
-        new Dop(DalvOps.SHR_LONG, DalvOps.SHR_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "shr-long");
+        new Dop(Opcodes.SHR_LONG, Opcodes.SHR_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop USHR_LONG =
-        new Dop(DalvOps.USHR_LONG, DalvOps.USHR_LONG,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "ushr-long");
+        new Dop(Opcodes.USHR_LONG, Opcodes.USHR_LONG,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop ADD_FLOAT =
-        new Dop(DalvOps.ADD_FLOAT, DalvOps.ADD_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "add-float");
+        new Dop(Opcodes.ADD_FLOAT, Opcodes.ADD_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SUB_FLOAT =
-        new Dop(DalvOps.SUB_FLOAT, DalvOps.SUB_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "sub-float");
+        new Dop(Opcodes.SUB_FLOAT, Opcodes.SUB_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop MUL_FLOAT =
-        new Dop(DalvOps.MUL_FLOAT, DalvOps.MUL_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "mul-float");
+        new Dop(Opcodes.MUL_FLOAT, Opcodes.MUL_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop DIV_FLOAT =
-        new Dop(DalvOps.DIV_FLOAT, DalvOps.DIV_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "div-float");
+        new Dop(Opcodes.DIV_FLOAT, Opcodes.DIV_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop REM_FLOAT =
-        new Dop(DalvOps.REM_FLOAT, DalvOps.REM_FLOAT,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "rem-float");
+        new Dop(Opcodes.REM_FLOAT, Opcodes.REM_FLOAT,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop ADD_DOUBLE =
-        new Dop(DalvOps.ADD_DOUBLE, DalvOps.ADD_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "add-double");
+        new Dop(Opcodes.ADD_DOUBLE, Opcodes.ADD_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop SUB_DOUBLE =
-        new Dop(DalvOps.SUB_DOUBLE, DalvOps.SUB_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "sub-double");
+        new Dop(Opcodes.SUB_DOUBLE, Opcodes.SUB_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop MUL_DOUBLE =
-        new Dop(DalvOps.MUL_DOUBLE, DalvOps.MUL_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "mul-double");
+        new Dop(Opcodes.MUL_DOUBLE, Opcodes.MUL_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop DIV_DOUBLE =
-        new Dop(DalvOps.DIV_DOUBLE, DalvOps.DIV_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "div-double");
+        new Dop(Opcodes.DIV_DOUBLE, Opcodes.DIV_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop REM_DOUBLE =
-        new Dop(DalvOps.REM_DOUBLE, DalvOps.REM_DOUBLE,
-            DalvOps.NO_NEXT, Form23x.THE_ONE, true,
-            "rem-double");
+        new Dop(Opcodes.REM_DOUBLE, Opcodes.REM_DOUBLE,
+            Opcodes.NO_NEXT, Form23x.THE_ONE, true);
 
     public static final Dop ADD_INT_2ADDR =
-        new Dop(DalvOps.ADD_INT_2ADDR, DalvOps.ADD_INT,
-            DalvOps.ADD_INT, Form12x.THE_ONE, true,
-            "add-int/2addr");
+        new Dop(Opcodes.ADD_INT_2ADDR, Opcodes.ADD_INT,
+            Opcodes.ADD_INT, Form12x.THE_ONE, true);
 
     public static final Dop SUB_INT_2ADDR =
-        new Dop(DalvOps.SUB_INT_2ADDR, DalvOps.SUB_INT,
-            DalvOps.SUB_INT, Form12x.THE_ONE, true,
-            "sub-int/2addr");
+        new Dop(Opcodes.SUB_INT_2ADDR, Opcodes.SUB_INT,
+            Opcodes.SUB_INT, Form12x.THE_ONE, true);
 
     public static final Dop MUL_INT_2ADDR =
-        new Dop(DalvOps.MUL_INT_2ADDR, DalvOps.MUL_INT,
-            DalvOps.MUL_INT, Form12x.THE_ONE, true,
-            "mul-int/2addr");
+        new Dop(Opcodes.MUL_INT_2ADDR, Opcodes.MUL_INT,
+            Opcodes.MUL_INT, Form12x.THE_ONE, true);
 
     public static final Dop DIV_INT_2ADDR =
-        new Dop(DalvOps.DIV_INT_2ADDR, DalvOps.DIV_INT,
-            DalvOps.DIV_INT, Form12x.THE_ONE, true,
-            "div-int/2addr");
+        new Dop(Opcodes.DIV_INT_2ADDR, Opcodes.DIV_INT,
+            Opcodes.DIV_INT, Form12x.THE_ONE, true);
 
     public static final Dop REM_INT_2ADDR =
-        new Dop(DalvOps.REM_INT_2ADDR, DalvOps.REM_INT,
-            DalvOps.REM_INT, Form12x.THE_ONE, true,
-            "rem-int/2addr");
+        new Dop(Opcodes.REM_INT_2ADDR, Opcodes.REM_INT,
+            Opcodes.REM_INT, Form12x.THE_ONE, true);
 
     public static final Dop AND_INT_2ADDR =
-        new Dop(DalvOps.AND_INT_2ADDR, DalvOps.AND_INT,
-            DalvOps.AND_INT, Form12x.THE_ONE, true,
-            "and-int/2addr");
+        new Dop(Opcodes.AND_INT_2ADDR, Opcodes.AND_INT,
+            Opcodes.AND_INT, Form12x.THE_ONE, true);
 
     public static final Dop OR_INT_2ADDR =
-        new Dop(DalvOps.OR_INT_2ADDR, DalvOps.OR_INT,
-            DalvOps.OR_INT, Form12x.THE_ONE, true,
-            "or-int/2addr");
+        new Dop(Opcodes.OR_INT_2ADDR, Opcodes.OR_INT,
+            Opcodes.OR_INT, Form12x.THE_ONE, true);
 
     public static final Dop XOR_INT_2ADDR =
-        new Dop(DalvOps.XOR_INT_2ADDR, DalvOps.XOR_INT,
-            DalvOps.XOR_INT, Form12x.THE_ONE, true,
-            "xor-int/2addr");
+        new Dop(Opcodes.XOR_INT_2ADDR, Opcodes.XOR_INT,
+            Opcodes.XOR_INT, Form12x.THE_ONE, true);
 
     public static final Dop SHL_INT_2ADDR =
-        new Dop(DalvOps.SHL_INT_2ADDR, DalvOps.SHL_INT,
-            DalvOps.SHL_INT, Form12x.THE_ONE, true,
-            "shl-int/2addr");
+        new Dop(Opcodes.SHL_INT_2ADDR, Opcodes.SHL_INT,
+            Opcodes.SHL_INT, Form12x.THE_ONE, true);
 
     public static final Dop SHR_INT_2ADDR =
-        new Dop(DalvOps.SHR_INT_2ADDR, DalvOps.SHR_INT,
-            DalvOps.SHR_INT, Form12x.THE_ONE, true,
-            "shr-int/2addr");
+        new Dop(Opcodes.SHR_INT_2ADDR, Opcodes.SHR_INT,
+            Opcodes.SHR_INT, Form12x.THE_ONE, true);
 
     public static final Dop USHR_INT_2ADDR =
-        new Dop(DalvOps.USHR_INT_2ADDR, DalvOps.USHR_INT,
-            DalvOps.USHR_INT, Form12x.THE_ONE, true,
-            "ushr-int/2addr");
+        new Dop(Opcodes.USHR_INT_2ADDR, Opcodes.USHR_INT,
+            Opcodes.USHR_INT, Form12x.THE_ONE, true);
 
     public static final Dop ADD_LONG_2ADDR =
-        new Dop(DalvOps.ADD_LONG_2ADDR, DalvOps.ADD_LONG,
-            DalvOps.ADD_LONG, Form12x.THE_ONE, true,
-            "add-long/2addr");
+        new Dop(Opcodes.ADD_LONG_2ADDR, Opcodes.ADD_LONG,
+            Opcodes.ADD_LONG, Form12x.THE_ONE, true);
 
     public static final Dop SUB_LONG_2ADDR =
-        new Dop(DalvOps.SUB_LONG_2ADDR, DalvOps.SUB_LONG,
-            DalvOps.SUB_LONG, Form12x.THE_ONE, true,
-            "sub-long/2addr");
+        new Dop(Opcodes.SUB_LONG_2ADDR, Opcodes.SUB_LONG,
+            Opcodes.SUB_LONG, Form12x.THE_ONE, true);
 
     public static final Dop MUL_LONG_2ADDR =
-        new Dop(DalvOps.MUL_LONG_2ADDR, DalvOps.MUL_LONG,
-            DalvOps.MUL_LONG, Form12x.THE_ONE, true,
-            "mul-long/2addr");
+        new Dop(Opcodes.MUL_LONG_2ADDR, Opcodes.MUL_LONG,
+            Opcodes.MUL_LONG, Form12x.THE_ONE, true);
 
     public static final Dop DIV_LONG_2ADDR =
-        new Dop(DalvOps.DIV_LONG_2ADDR, DalvOps.DIV_LONG,
-            DalvOps.DIV_LONG, Form12x.THE_ONE, true,
-            "div-long/2addr");
+        new Dop(Opcodes.DIV_LONG_2ADDR, Opcodes.DIV_LONG,
+            Opcodes.DIV_LONG, Form12x.THE_ONE, true);
 
     public static final Dop REM_LONG_2ADDR =
-        new Dop(DalvOps.REM_LONG_2ADDR, DalvOps.REM_LONG,
-            DalvOps.REM_LONG, Form12x.THE_ONE, true,
-            "rem-long/2addr");
+        new Dop(Opcodes.REM_LONG_2ADDR, Opcodes.REM_LONG,
+            Opcodes.REM_LONG, Form12x.THE_ONE, true);
 
     public static final Dop AND_LONG_2ADDR =
-        new Dop(DalvOps.AND_LONG_2ADDR, DalvOps.AND_LONG,
-            DalvOps.AND_LONG, Form12x.THE_ONE, true,
-            "and-long/2addr");
+        new Dop(Opcodes.AND_LONG_2ADDR, Opcodes.AND_LONG,
+            Opcodes.AND_LONG, Form12x.THE_ONE, true);
 
     public static final Dop OR_LONG_2ADDR =
-        new Dop(DalvOps.OR_LONG_2ADDR, DalvOps.OR_LONG,
-            DalvOps.OR_LONG, Form12x.THE_ONE, true,
-            "or-long/2addr");
+        new Dop(Opcodes.OR_LONG_2ADDR, Opcodes.OR_LONG,
+            Opcodes.OR_LONG, Form12x.THE_ONE, true);
 
     public static final Dop XOR_LONG_2ADDR =
-        new Dop(DalvOps.XOR_LONG_2ADDR, DalvOps.XOR_LONG,
-            DalvOps.XOR_LONG, Form12x.THE_ONE, true,
-            "xor-long/2addr");
+        new Dop(Opcodes.XOR_LONG_2ADDR, Opcodes.XOR_LONG,
+            Opcodes.XOR_LONG, Form12x.THE_ONE, true);
 
     public static final Dop SHL_LONG_2ADDR =
-        new Dop(DalvOps.SHL_LONG_2ADDR, DalvOps.SHL_LONG,
-            DalvOps.SHL_LONG, Form12x.THE_ONE, true,
-            "shl-long/2addr");
+        new Dop(Opcodes.SHL_LONG_2ADDR, Opcodes.SHL_LONG,
+            Opcodes.SHL_LONG, Form12x.THE_ONE, true);
 
     public static final Dop SHR_LONG_2ADDR =
-        new Dop(DalvOps.SHR_LONG_2ADDR, DalvOps.SHR_LONG,
-            DalvOps.SHR_LONG, Form12x.THE_ONE, true,
-            "shr-long/2addr");
+        new Dop(Opcodes.SHR_LONG_2ADDR, Opcodes.SHR_LONG,
+            Opcodes.SHR_LONG, Form12x.THE_ONE, true);
 
     public static final Dop USHR_LONG_2ADDR =
-        new Dop(DalvOps.USHR_LONG_2ADDR, DalvOps.USHR_LONG,
-            DalvOps.USHR_LONG, Form12x.THE_ONE, true,
-            "ushr-long/2addr");
+        new Dop(Opcodes.USHR_LONG_2ADDR, Opcodes.USHR_LONG,
+            Opcodes.USHR_LONG, Form12x.THE_ONE, true);
 
     public static final Dop ADD_FLOAT_2ADDR =
-        new Dop(DalvOps.ADD_FLOAT_2ADDR, DalvOps.ADD_FLOAT,
-            DalvOps.ADD_FLOAT, Form12x.THE_ONE, true,
-            "add-float/2addr");
+        new Dop(Opcodes.ADD_FLOAT_2ADDR, Opcodes.ADD_FLOAT,
+            Opcodes.ADD_FLOAT, Form12x.THE_ONE, true);
 
     public static final Dop SUB_FLOAT_2ADDR =
-        new Dop(DalvOps.SUB_FLOAT_2ADDR, DalvOps.SUB_FLOAT,
-            DalvOps.SUB_FLOAT, Form12x.THE_ONE, true,
-            "sub-float/2addr");
+        new Dop(Opcodes.SUB_FLOAT_2ADDR, Opcodes.SUB_FLOAT,
+            Opcodes.SUB_FLOAT, Form12x.THE_ONE, true);
 
     public static final Dop MUL_FLOAT_2ADDR =
-        new Dop(DalvOps.MUL_FLOAT_2ADDR, DalvOps.MUL_FLOAT,
-            DalvOps.MUL_FLOAT, Form12x.THE_ONE, true,
-            "mul-float/2addr");
+        new Dop(Opcodes.MUL_FLOAT_2ADDR, Opcodes.MUL_FLOAT,
+            Opcodes.MUL_FLOAT, Form12x.THE_ONE, true);
 
     public static final Dop DIV_FLOAT_2ADDR =
-        new Dop(DalvOps.DIV_FLOAT_2ADDR, DalvOps.DIV_FLOAT,
-            DalvOps.DIV_FLOAT, Form12x.THE_ONE, true,
-            "div-float/2addr");
+        new Dop(Opcodes.DIV_FLOAT_2ADDR, Opcodes.DIV_FLOAT,
+            Opcodes.DIV_FLOAT, Form12x.THE_ONE, true);
 
     public static final Dop REM_FLOAT_2ADDR =
-        new Dop(DalvOps.REM_FLOAT_2ADDR, DalvOps.REM_FLOAT,
-            DalvOps.REM_FLOAT, Form12x.THE_ONE, true,
-            "rem-float/2addr");
+        new Dop(Opcodes.REM_FLOAT_2ADDR, Opcodes.REM_FLOAT,
+            Opcodes.REM_FLOAT, Form12x.THE_ONE, true);
 
     public static final Dop ADD_DOUBLE_2ADDR =
-        new Dop(DalvOps.ADD_DOUBLE_2ADDR, DalvOps.ADD_DOUBLE,
-            DalvOps.ADD_DOUBLE, Form12x.THE_ONE, true,
-            "add-double/2addr");
+        new Dop(Opcodes.ADD_DOUBLE_2ADDR, Opcodes.ADD_DOUBLE,
+            Opcodes.ADD_DOUBLE, Form12x.THE_ONE, true);
 
     public static final Dop SUB_DOUBLE_2ADDR =
-        new Dop(DalvOps.SUB_DOUBLE_2ADDR, DalvOps.SUB_DOUBLE,
-            DalvOps.SUB_DOUBLE, Form12x.THE_ONE, true,
-            "sub-double/2addr");
+        new Dop(Opcodes.SUB_DOUBLE_2ADDR, Opcodes.SUB_DOUBLE,
+            Opcodes.SUB_DOUBLE, Form12x.THE_ONE, true);
 
     public static final Dop MUL_DOUBLE_2ADDR =
-        new Dop(DalvOps.MUL_DOUBLE_2ADDR, DalvOps.MUL_DOUBLE,
-            DalvOps.MUL_DOUBLE, Form12x.THE_ONE, true,
-            "mul-double/2addr");
+        new Dop(Opcodes.MUL_DOUBLE_2ADDR, Opcodes.MUL_DOUBLE,
+            Opcodes.MUL_DOUBLE, Form12x.THE_ONE, true);
 
     public static final Dop DIV_DOUBLE_2ADDR =
-        new Dop(DalvOps.DIV_DOUBLE_2ADDR, DalvOps.DIV_DOUBLE,
-            DalvOps.DIV_DOUBLE, Form12x.THE_ONE, true,
-            "div-double/2addr");
+        new Dop(Opcodes.DIV_DOUBLE_2ADDR, Opcodes.DIV_DOUBLE,
+            Opcodes.DIV_DOUBLE, Form12x.THE_ONE, true);
 
     public static final Dop REM_DOUBLE_2ADDR =
-        new Dop(DalvOps.REM_DOUBLE_2ADDR, DalvOps.REM_DOUBLE,
-            DalvOps.REM_DOUBLE, Form12x.THE_ONE, true,
-            "rem-double/2addr");
+        new Dop(Opcodes.REM_DOUBLE_2ADDR, Opcodes.REM_DOUBLE,
+            Opcodes.REM_DOUBLE, Form12x.THE_ONE, true);
 
     public static final Dop ADD_INT_LIT16 =
-        new Dop(DalvOps.ADD_INT_LIT16, DalvOps.ADD_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "add-int/lit16");
+        new Dop(Opcodes.ADD_INT_LIT16, Opcodes.ADD_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop RSUB_INT =
-        new Dop(DalvOps.RSUB_INT, DalvOps.RSUB_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "rsub-int");
+        new Dop(Opcodes.RSUB_INT, Opcodes.RSUB_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop MUL_INT_LIT16 =
-        new Dop(DalvOps.MUL_INT_LIT16, DalvOps.MUL_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "mul-int/lit16");
+        new Dop(Opcodes.MUL_INT_LIT16, Opcodes.MUL_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop DIV_INT_LIT16 =
-        new Dop(DalvOps.DIV_INT_LIT16, DalvOps.DIV_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "div-int/lit16");
+        new Dop(Opcodes.DIV_INT_LIT16, Opcodes.DIV_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop REM_INT_LIT16 =
-        new Dop(DalvOps.REM_INT_LIT16, DalvOps.REM_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "rem-int/lit16");
+        new Dop(Opcodes.REM_INT_LIT16, Opcodes.REM_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop AND_INT_LIT16 =
-        new Dop(DalvOps.AND_INT_LIT16, DalvOps.AND_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "and-int/lit16");
+        new Dop(Opcodes.AND_INT_LIT16, Opcodes.AND_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop OR_INT_LIT16 =
-        new Dop(DalvOps.OR_INT_LIT16, DalvOps.OR_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "or-int/lit16");
+        new Dop(Opcodes.OR_INT_LIT16, Opcodes.OR_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop XOR_INT_LIT16 =
-        new Dop(DalvOps.XOR_INT_LIT16, DalvOps.XOR_INT,
-            DalvOps.NO_NEXT, Form22s.THE_ONE, true,
-            "xor-int/lit16");
+        new Dop(Opcodes.XOR_INT_LIT16, Opcodes.XOR_INT,
+            Opcodes.NO_NEXT, Form22s.THE_ONE, true);
 
     public static final Dop ADD_INT_LIT8 =
-        new Dop(DalvOps.ADD_INT_LIT8, DalvOps.ADD_INT,
-            DalvOps.ADD_INT_LIT16, Form22b.THE_ONE, true,
-            "add-int/lit8");
+        new Dop(Opcodes.ADD_INT_LIT8, Opcodes.ADD_INT,
+            Opcodes.ADD_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop RSUB_INT_LIT8 =
-        new Dop(DalvOps.RSUB_INT_LIT8, DalvOps.RSUB_INT,
-            DalvOps.RSUB_INT, Form22b.THE_ONE, true,
-            "rsub-int/lit8");
+        new Dop(Opcodes.RSUB_INT_LIT8, Opcodes.RSUB_INT,
+            Opcodes.RSUB_INT, Form22b.THE_ONE, true);
 
     public static final Dop MUL_INT_LIT8 =
-        new Dop(DalvOps.MUL_INT_LIT8, DalvOps.MUL_INT,
-            DalvOps.MUL_INT_LIT16, Form22b.THE_ONE, true,
-            "mul-int/lit8");
+        new Dop(Opcodes.MUL_INT_LIT8, Opcodes.MUL_INT,
+            Opcodes.MUL_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop DIV_INT_LIT8 =
-        new Dop(DalvOps.DIV_INT_LIT8, DalvOps.DIV_INT,
-            DalvOps.DIV_INT_LIT16, Form22b.THE_ONE, true,
-            "div-int/lit8");
+        new Dop(Opcodes.DIV_INT_LIT8, Opcodes.DIV_INT,
+            Opcodes.DIV_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop REM_INT_LIT8 =
-        new Dop(DalvOps.REM_INT_LIT8, DalvOps.REM_INT,
-            DalvOps.REM_INT_LIT16, Form22b.THE_ONE, true,
-            "rem-int/lit8");
+        new Dop(Opcodes.REM_INT_LIT8, Opcodes.REM_INT,
+            Opcodes.REM_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop AND_INT_LIT8 =
-        new Dop(DalvOps.AND_INT_LIT8, DalvOps.AND_INT,
-            DalvOps.AND_INT_LIT16, Form22b.THE_ONE, true,
-            "and-int/lit8");
+        new Dop(Opcodes.AND_INT_LIT8, Opcodes.AND_INT,
+            Opcodes.AND_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop OR_INT_LIT8 =
-        new Dop(DalvOps.OR_INT_LIT8, DalvOps.OR_INT,
-            DalvOps.OR_INT_LIT16, Form22b.THE_ONE, true,
-            "or-int/lit8");
+        new Dop(Opcodes.OR_INT_LIT8, Opcodes.OR_INT,
+            Opcodes.OR_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop XOR_INT_LIT8 =
-        new Dop(DalvOps.XOR_INT_LIT8, DalvOps.XOR_INT,
-            DalvOps.XOR_INT_LIT16, Form22b.THE_ONE, true,
-            "xor-int/lit8");
+        new Dop(Opcodes.XOR_INT_LIT8, Opcodes.XOR_INT,
+            Opcodes.XOR_INT_LIT16, Form22b.THE_ONE, true);
 
     public static final Dop SHL_INT_LIT8 =
-        new Dop(DalvOps.SHL_INT_LIT8, DalvOps.SHL_INT,
-            DalvOps.NO_NEXT, Form22b.THE_ONE, true,
-            "shl-int/lit8");
+        new Dop(Opcodes.SHL_INT_LIT8, Opcodes.SHL_INT,
+            Opcodes.NO_NEXT, Form22b.THE_ONE, true);
 
     public static final Dop SHR_INT_LIT8 =
-        new Dop(DalvOps.SHR_INT_LIT8, DalvOps.SHR_INT,
-            DalvOps.NO_NEXT, Form22b.THE_ONE, true,
-            "shr-int/lit8");
+        new Dop(Opcodes.SHR_INT_LIT8, Opcodes.SHR_INT,
+            Opcodes.NO_NEXT, Form22b.THE_ONE, true);
 
     public static final Dop USHR_INT_LIT8 =
-        new Dop(DalvOps.USHR_INT_LIT8, DalvOps.USHR_INT,
-            DalvOps.NO_NEXT, Form22b.THE_ONE, true,
-            "ushr-int/lit8");
+        new Dop(Opcodes.USHR_INT_LIT8, Opcodes.USHR_INT,
+            Opcodes.NO_NEXT, Form22b.THE_ONE, true);
 
     public static final Dop CONST_CLASS_JUMBO =
-        new Dop(DalvOps.CONST_CLASS_JUMBO, DalvOps.CONST_CLASS,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "const-class/jumbo");
+        new Dop(Opcodes.CONST_CLASS_JUMBO, Opcodes.CONST_CLASS,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop CHECK_CAST_JUMBO =
-        new Dop(DalvOps.CHECK_CAST_JUMBO, DalvOps.CHECK_CAST,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "check-cast/jumbo");
+        new Dop(Opcodes.CHECK_CAST_JUMBO, Opcodes.CHECK_CAST,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop INSTANCE_OF_JUMBO =
-        new Dop(DalvOps.INSTANCE_OF_JUMBO, DalvOps.INSTANCE_OF,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "instance-of/jumbo");
+        new Dop(Opcodes.INSTANCE_OF_JUMBO, Opcodes.INSTANCE_OF,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop NEW_INSTANCE_JUMBO =
-        new Dop(DalvOps.NEW_INSTANCE_JUMBO, DalvOps.NEW_INSTANCE,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "new-instance/jumbo");
+        new Dop(Opcodes.NEW_INSTANCE_JUMBO, Opcodes.NEW_INSTANCE,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop NEW_ARRAY_JUMBO =
-        new Dop(DalvOps.NEW_ARRAY_JUMBO, DalvOps.NEW_ARRAY,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "new-array/jumbo");
+        new Dop(Opcodes.NEW_ARRAY_JUMBO, Opcodes.NEW_ARRAY,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop FILLED_NEW_ARRAY_JUMBO =
-        new Dop(DalvOps.FILLED_NEW_ARRAY_JUMBO, DalvOps.FILLED_NEW_ARRAY,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "filled-new-array/jumbo");
+        new Dop(Opcodes.FILLED_NEW_ARRAY_JUMBO, Opcodes.FILLED_NEW_ARRAY,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     public static final Dop IGET_JUMBO =
-        new Dop(DalvOps.IGET_JUMBO, DalvOps.IGET,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget/jumbo");
+        new Dop(Opcodes.IGET_JUMBO, Opcodes.IGET,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_WIDE_JUMBO =
-        new Dop(DalvOps.IGET_WIDE_JUMBO, DalvOps.IGET_WIDE,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-wide/jumbo");
+        new Dop(Opcodes.IGET_WIDE_JUMBO, Opcodes.IGET_WIDE,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_OBJECT_JUMBO =
-        new Dop(DalvOps.IGET_OBJECT_JUMBO, DalvOps.IGET_OBJECT,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-object/jumbo");
+        new Dop(Opcodes.IGET_OBJECT_JUMBO, Opcodes.IGET_OBJECT,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_BOOLEAN_JUMBO =
-        new Dop(DalvOps.IGET_BOOLEAN_JUMBO, DalvOps.IGET_BOOLEAN,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-boolean/jumbo");
+        new Dop(Opcodes.IGET_BOOLEAN_JUMBO, Opcodes.IGET_BOOLEAN,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_BYTE_JUMBO =
-        new Dop(DalvOps.IGET_BYTE_JUMBO, DalvOps.IGET_BYTE,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-byte/jumbo");
+        new Dop(Opcodes.IGET_BYTE_JUMBO, Opcodes.IGET_BYTE,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_CHAR_JUMBO =
-        new Dop(DalvOps.IGET_CHAR_JUMBO, DalvOps.IGET_CHAR,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-char/jumbo");
+        new Dop(Opcodes.IGET_CHAR_JUMBO, Opcodes.IGET_CHAR,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IGET_SHORT_JUMBO =
-        new Dop(DalvOps.IGET_SHORT_JUMBO, DalvOps.IGET_SHORT,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, true,
-            "iget-short/jumbo");
+        new Dop(Opcodes.IGET_SHORT_JUMBO, Opcodes.IGET_SHORT,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, true);
 
     public static final Dop IPUT_JUMBO =
-        new Dop(DalvOps.IPUT_JUMBO, DalvOps.IPUT,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput/jumbo");
+        new Dop(Opcodes.IPUT_JUMBO, Opcodes.IPUT,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_WIDE_JUMBO =
-        new Dop(DalvOps.IPUT_WIDE_JUMBO, DalvOps.IPUT_WIDE,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-wide/jumbo");
+        new Dop(Opcodes.IPUT_WIDE_JUMBO, Opcodes.IPUT_WIDE,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_OBJECT_JUMBO =
-        new Dop(DalvOps.IPUT_OBJECT_JUMBO, DalvOps.IPUT_OBJECT,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-object/jumbo");
+        new Dop(Opcodes.IPUT_OBJECT_JUMBO, Opcodes.IPUT_OBJECT,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_BOOLEAN_JUMBO =
-        new Dop(DalvOps.IPUT_BOOLEAN_JUMBO, DalvOps.IPUT_BOOLEAN,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-boolean/jumbo");
+        new Dop(Opcodes.IPUT_BOOLEAN_JUMBO, Opcodes.IPUT_BOOLEAN,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_BYTE_JUMBO =
-        new Dop(DalvOps.IPUT_BYTE_JUMBO, DalvOps.IPUT_BYTE,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-byte/jumbo");
+        new Dop(Opcodes.IPUT_BYTE_JUMBO, Opcodes.IPUT_BYTE,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_CHAR_JUMBO =
-        new Dop(DalvOps.IPUT_CHAR_JUMBO, DalvOps.IPUT_CHAR,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-char/jumbo");
+        new Dop(Opcodes.IPUT_CHAR_JUMBO, Opcodes.IPUT_CHAR,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop IPUT_SHORT_JUMBO =
-        new Dop(DalvOps.IPUT_SHORT_JUMBO, DalvOps.IPUT_SHORT,
-            DalvOps.NO_NEXT, Form52c.THE_ONE, false,
-            "iput-short/jumbo");
+        new Dop(Opcodes.IPUT_SHORT_JUMBO, Opcodes.IPUT_SHORT,
+            Opcodes.NO_NEXT, Form52c.THE_ONE, false);
 
     public static final Dop SGET_JUMBO =
-        new Dop(DalvOps.SGET_JUMBO, DalvOps.SGET,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget/jumbo");
+        new Dop(Opcodes.SGET_JUMBO, Opcodes.SGET,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_WIDE_JUMBO =
-        new Dop(DalvOps.SGET_WIDE_JUMBO, DalvOps.SGET_WIDE,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-wide/jumbo");
+        new Dop(Opcodes.SGET_WIDE_JUMBO, Opcodes.SGET_WIDE,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_OBJECT_JUMBO =
-        new Dop(DalvOps.SGET_OBJECT_JUMBO, DalvOps.SGET_OBJECT,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-object/jumbo");
+        new Dop(Opcodes.SGET_OBJECT_JUMBO, Opcodes.SGET_OBJECT,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_BOOLEAN_JUMBO =
-        new Dop(DalvOps.SGET_BOOLEAN_JUMBO, DalvOps.SGET_BOOLEAN,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-boolean/jumbo");
+        new Dop(Opcodes.SGET_BOOLEAN_JUMBO, Opcodes.SGET_BOOLEAN,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_BYTE_JUMBO =
-        new Dop(DalvOps.SGET_BYTE_JUMBO, DalvOps.SGET_BYTE,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-byte/jumbo");
+        new Dop(Opcodes.SGET_BYTE_JUMBO, Opcodes.SGET_BYTE,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_CHAR_JUMBO =
-        new Dop(DalvOps.SGET_CHAR_JUMBO, DalvOps.SGET_CHAR,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-char/jumbo");
+        new Dop(Opcodes.SGET_CHAR_JUMBO, Opcodes.SGET_CHAR,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SGET_SHORT_JUMBO =
-        new Dop(DalvOps.SGET_SHORT_JUMBO, DalvOps.SGET_SHORT,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, true,
-            "sget-short/jumbo");
+        new Dop(Opcodes.SGET_SHORT_JUMBO, Opcodes.SGET_SHORT,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, true);
 
     public static final Dop SPUT_JUMBO =
-        new Dop(DalvOps.SPUT_JUMBO, DalvOps.SPUT,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput/jumbo");
+        new Dop(Opcodes.SPUT_JUMBO, Opcodes.SPUT,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_WIDE_JUMBO =
-        new Dop(DalvOps.SPUT_WIDE_JUMBO, DalvOps.SPUT_WIDE,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-wide/jumbo");
+        new Dop(Opcodes.SPUT_WIDE_JUMBO, Opcodes.SPUT_WIDE,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_OBJECT_JUMBO =
-        new Dop(DalvOps.SPUT_OBJECT_JUMBO, DalvOps.SPUT_OBJECT,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-object/jumbo");
+        new Dop(Opcodes.SPUT_OBJECT_JUMBO, Opcodes.SPUT_OBJECT,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_BOOLEAN_JUMBO =
-        new Dop(DalvOps.SPUT_BOOLEAN_JUMBO, DalvOps.SPUT_BOOLEAN,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-boolean/jumbo");
+        new Dop(Opcodes.SPUT_BOOLEAN_JUMBO, Opcodes.SPUT_BOOLEAN,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_BYTE_JUMBO =
-        new Dop(DalvOps.SPUT_BYTE_JUMBO, DalvOps.SPUT_BYTE,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-byte/jumbo");
+        new Dop(Opcodes.SPUT_BYTE_JUMBO, Opcodes.SPUT_BYTE,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_CHAR_JUMBO =
-        new Dop(DalvOps.SPUT_CHAR_JUMBO, DalvOps.SPUT_CHAR,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-char/jumbo");
+        new Dop(Opcodes.SPUT_CHAR_JUMBO, Opcodes.SPUT_CHAR,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop SPUT_SHORT_JUMBO =
-        new Dop(DalvOps.SPUT_SHORT_JUMBO, DalvOps.SPUT_SHORT,
-            DalvOps.NO_NEXT, Form41c.THE_ONE, false,
-            "sput-short/jumbo");
+        new Dop(Opcodes.SPUT_SHORT_JUMBO, Opcodes.SPUT_SHORT,
+            Opcodes.NO_NEXT, Form41c.THE_ONE, false);
 
     public static final Dop INVOKE_VIRTUAL_JUMBO =
-        new Dop(DalvOps.INVOKE_VIRTUAL_JUMBO, DalvOps.INVOKE_VIRTUAL,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "invoke-virtual/jumbo");
+        new Dop(Opcodes.INVOKE_VIRTUAL_JUMBO, Opcodes.INVOKE_VIRTUAL,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     public static final Dop INVOKE_SUPER_JUMBO =
-        new Dop(DalvOps.INVOKE_SUPER_JUMBO, DalvOps.INVOKE_SUPER,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "invoke-super/jumbo");
+        new Dop(Opcodes.INVOKE_SUPER_JUMBO, Opcodes.INVOKE_SUPER,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     public static final Dop INVOKE_DIRECT_JUMBO =
-        new Dop(DalvOps.INVOKE_DIRECT_JUMBO, DalvOps.INVOKE_DIRECT,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "invoke-direct/jumbo");
+        new Dop(Opcodes.INVOKE_DIRECT_JUMBO, Opcodes.INVOKE_DIRECT,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     public static final Dop INVOKE_STATIC_JUMBO =
-        new Dop(DalvOps.INVOKE_STATIC_JUMBO, DalvOps.INVOKE_STATIC,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "invoke-static/jumbo");
+        new Dop(Opcodes.INVOKE_STATIC_JUMBO, Opcodes.INVOKE_STATIC,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     public static final Dop INVOKE_INTERFACE_JUMBO =
-        new Dop(DalvOps.INVOKE_INTERFACE_JUMBO, DalvOps.INVOKE_INTERFACE,
-            DalvOps.NO_NEXT, Form5rc.THE_ONE, false,
-            "invoke-interface/jumbo");
+        new Dop(Opcodes.INVOKE_INTERFACE_JUMBO, Opcodes.INVOKE_INTERFACE,
+            Opcodes.NO_NEXT, Form5rc.THE_ONE, false);
 
     // END(dops)
 
     // Static initialization.
     static {
-        DOPS = new Dop[DalvOps.MAX_VALUE - DalvOps.MIN_VALUE + 1];
+        DOPS = new Dop[Opcodes.MAX_VALUE - Opcodes.MIN_VALUE + 1];
 
         set(SPECIAL_FORMAT);
 
@@ -1627,12 +1372,12 @@
     /**
      * Gets the {@link Dop} for the given opcode value.
      *
-     * @param opcode {@code DalvOps.MIN_VALUE..DalvOps.MAX_VALUE;} the
+     * @param opcode {@code Opcodes.MIN_VALUE..Opcodes.MAX_VALUE;} the
      * opcode value
      * @return {@code non-null;} the associated opcode instance
      */
     public static Dop get(int opcode) {
-        int idx = opcode - DalvOps.MIN_VALUE;
+        int idx = opcode - Opcodes.MIN_VALUE;
 
         try {
             Dop result = DOPS[idx];
@@ -1658,7 +1403,7 @@
     public static Dop getNextOrNull(Dop opcode) {
         int nextOpcode = opcode.getNextOpcode();
 
-        if (nextOpcode == DalvOps.NO_NEXT) {
+        if (nextOpcode == Opcodes.NO_NEXT) {
             return null;
         }
 
@@ -1671,7 +1416,7 @@
      * @param opcode {@code non-null;} the opcode
      */
     private static void set(Dop opcode) {
-        int idx = opcode.getOpcode() - DalvOps.MIN_VALUE;
+        int idx = opcode.getOpcode() - Opcodes.MIN_VALUE;
         DOPS[idx] = opcode;
     }
 }
diff --git a/dx/src/com/android/dx/dex/code/InsnFormat.java b/dx/src/com/android/dx/dex/code/InsnFormat.java
index 4dfb6e2..a86a003 100644
--- a/dx/src/com/android/dx/dex/code/InsnFormat.java
+++ b/dx/src/com/android/dx/dex/code/InsnFormat.java
@@ -26,6 +26,8 @@
 import com.android.dx.util.AnnotatedOutput;
 import com.android.dx.util.Hex;
 
+import java.util.BitSet;
+
 /**
  * Base class for all instruction format handlers. Instruction format
  * handlers know how to translate {@link DalvInsn} instances into
@@ -38,7 +40,7 @@
      * temporary measure until VM support for the salient opcodes is
      * added. TODO: Remove this declaration when the VM can deal.
      */
-    public static boolean ALLOW_EXTENDED_OPCODES = false;
+    public static boolean ALLOW_EXTENDED_OPCODES = true;
 
     /**
      * Returns the string form, suitable for inclusion in a listing
@@ -127,6 +129,22 @@
     public abstract boolean isCompatible(DalvInsn insn);
 
     /**
+     * Returns which of a given instruction's registers will fit in
+     * this instance's format.
+     *
+     * <p>The default implementation of this method always returns
+     * an empty BitSet. Subclasses must override this method if they
+     * have registers.</p>
+     *
+     * @param insn {@code non-null;} the instruction to check
+     * @return {@code non-null;} a BitSet flagging registers in the
+     * register list that are compatible to this format
+     */
+    public BitSet compatibleRegs(DalvInsn insn) {
+        return new BitSet();
+    }
+
+    /**
      * Returns whether or not the given instruction's branch offset will
      * fit in this instance's format. This always returns {@code false}
      * for formats that don't include a branch offset.
@@ -488,8 +506,9 @@
     protected static short opcodeUnit(DalvInsn insn) {
         int opcode = insn.getOpcode().getOpcode();
 
-        if ((opcode < 0x100) || (opcode > 0xffff)) {
-            throw new IllegalArgumentException("opcode out of range 0..65535");
+        if ((opcode < 0xff) || (opcode > 0xffff)) {
+            throw new IllegalArgumentException(
+                "extended opcode out of range 255..65535");
         }
 
         return (short) opcode;
diff --git a/dx/src/com/android/dx/dex/code/OddSpacer.java b/dx/src/com/android/dx/dex/code/OddSpacer.java
index 756a0e2..f44f9cc 100644
--- a/dx/src/com/android/dx/dex/code/OddSpacer.java
+++ b/dx/src/com/android/dx/dex/code/OddSpacer.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.code.SourcePosition;
 import com.android.dx.util.AnnotatedOutput;
@@ -47,7 +48,7 @@
     @Override
     public void writeTo(AnnotatedOutput out) {
         if (codeSize() != 0) {
-            out.writeShort(InsnFormat.codeUnit(DalvOps.NOP, 0));
+            out.writeShort(InsnFormat.codeUnit(Opcodes.NOP, 0));
         }
     }
 
diff --git a/dx/src/com/android/dx/dex/code/OutputFinisher.java b/dx/src/com/android/dx/dex/code/OutputFinisher.java
index 9d6fec7..118d184 100644
--- a/dx/src/com/android/dx/dex/code/OutputFinisher.java
+++ b/dx/src/com/android/dx/dex/code/OutputFinisher.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.code.LocalItem;
 import com.android.dx.rop.code.RegisterSpec;
 import com.android.dx.rop.code.RegisterSpecList;
@@ -28,6 +29,7 @@
 import com.android.dx.rop.type.Type;
 
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.HashSet;
 
 /**
@@ -450,19 +452,19 @@
             Dop originalOpcode = opcodes[i];
             Dop newOpcode = findOpcodeForInsn(insn, originalOpcode);
 
-            if (originalOpcode == newOpcode) {
-                continue;
-            }
-
             if (newOpcode == null) {
                 /*
-                 * The instruction will need to be expanded, so reserve
-                 * registers for it.
+                 * The instruction will need to be expanded, so find the
+                 * expanded opcode and reserve registers for it.
                  */
-                int reserve = insn.getMinimumRegisterRequirement();
+                Dop expandedOp = findExpandedOpcodeForInsn(insn);
+                BitSet compatRegs = expandedOp.getFormat().compatibleRegs(insn);
+                int reserve = insn.getMinimumRegisterRequirement(compatRegs);
                 if (reserve > newReservedCount) {
                     newReservedCount = reserve;
                 }
+            } else if (originalOpcode == newOpcode) {
+                continue;
             }
 
             opcodes[i] = newOpcode;
@@ -505,6 +507,17 @@
     }
 
     /**
+     * Finds the proper opcode for the given instruction, ignoring
+     * register constraints.
+     *
+     * @param insn {@code non-null;} the instruction in question
+     * @return {@code non-null;} the opcode that fits
+     */
+    private Dop findExpandedOpcodeForInsn(DalvInsn insn) {
+        return findOpcodeForInsn(insn.getLowRegVersion(), insn.getOpcode());
+    }
+
+    /**
      * Helper for {@link #finishProcessingAndGetList}, which goes
      * through each instruction in the output, making sure its opcode
      * can accomodate its arguments. In cases where the opcode is
@@ -583,16 +596,14 @@
                 suffix = null;
             } else {
                 // Expansion is required.
-                prefix = insn.hrPrefix();
-                suffix = insn.hrSuffix();
+                currentOpcode = findExpandedOpcodeForInsn(insn);
+                BitSet compatRegs =
+                    currentOpcode.getFormat().compatibleRegs(insn);
+                prefix = insn.expandedPrefix(compatRegs);
+                suffix = insn.expandedSuffix(compatRegs);
 
-                /*
-                 * Get the initial guess as to the hr version, but then
-                 * let findOpcodeForInsn() pick a better format, if any.
-                 */
-                insn = insn.hrVersion();
-                originalOpcode = insn.getOpcode();
-                currentOpcode = findOpcodeForInsn(insn, originalOpcode);
+                // Expand necessary registers to fit the new format
+                insn = insn.expandedVersion(compatRegs);
             }
 
             if (prefix != null) {
@@ -671,7 +682,7 @@
                 continue;
             }
 
-            if (opcode.getFamily() == DalvOps.GOTO) {
+            if (opcode.getFamily() == Opcodes.GOTO) {
                 // It is a goto; widen it if possible.
                 opcode = findOpcodeForInsn(insn, opcode);
                 if (opcode == null) {
diff --git a/dx/src/com/android/dx/dex/code/RopToDop.java b/dx/src/com/android/dx/dex/code/RopToDop.java
index 856386b..5292d3c 100644
--- a/dx/src/com/android/dx/dex/code/RopToDop.java
+++ b/dx/src/com/android/dx/dex/code/RopToDop.java
@@ -57,163 +57,163 @@
      */
 
     // BEGIN(first-opcodes); GENERATED AUTOMATICALLY BY opcode-gen
-    //     DalvOps.NOP
-    //     DalvOps.MOVE
-    //     DalvOps.MOVE_WIDE
-    //     DalvOps.MOVE_OBJECT
-    //     DalvOps.MOVE_RESULT
-    //     DalvOps.MOVE_RESULT_WIDE
-    //     DalvOps.MOVE_RESULT_OBJECT
-    //     DalvOps.MOVE_EXCEPTION
-    //     DalvOps.RETURN_VOID
-    //     DalvOps.RETURN
-    //     DalvOps.RETURN_WIDE
-    //     DalvOps.RETURN_OBJECT
-    //     DalvOps.CONST_4
-    //     DalvOps.CONST_WIDE_16
-    //     DalvOps.CONST_STRING
-    //     DalvOps.CONST_CLASS
-    //     DalvOps.MONITOR_ENTER
-    //     DalvOps.MONITOR_EXIT
-    //     DalvOps.CHECK_CAST
-    //     DalvOps.INSTANCE_OF
-    //     DalvOps.ARRAY_LENGTH
-    //     DalvOps.NEW_INSTANCE
-    //     DalvOps.NEW_ARRAY
-    //     DalvOps.FILLED_NEW_ARRAY
-    //     DalvOps.FILL_ARRAY_DATA
-    //     DalvOps.THROW
-    //     DalvOps.GOTO
-    //     DalvOps.PACKED_SWITCH
-    //     DalvOps.SPARSE_SWITCH
-    //     DalvOps.CMPL_FLOAT
-    //     DalvOps.CMPG_FLOAT
-    //     DalvOps.CMPL_DOUBLE
-    //     DalvOps.CMPG_DOUBLE
-    //     DalvOps.CMP_LONG
-    //     DalvOps.IF_EQ
-    //     DalvOps.IF_NE
-    //     DalvOps.IF_LT
-    //     DalvOps.IF_GE
-    //     DalvOps.IF_GT
-    //     DalvOps.IF_LE
-    //     DalvOps.IF_EQZ
-    //     DalvOps.IF_NEZ
-    //     DalvOps.IF_LTZ
-    //     DalvOps.IF_GEZ
-    //     DalvOps.IF_GTZ
-    //     DalvOps.IF_LEZ
-    //     DalvOps.AGET
-    //     DalvOps.AGET_WIDE
-    //     DalvOps.AGET_OBJECT
-    //     DalvOps.AGET_BOOLEAN
-    //     DalvOps.AGET_BYTE
-    //     DalvOps.AGET_CHAR
-    //     DalvOps.AGET_SHORT
-    //     DalvOps.APUT
-    //     DalvOps.APUT_WIDE
-    //     DalvOps.APUT_OBJECT
-    //     DalvOps.APUT_BOOLEAN
-    //     DalvOps.APUT_BYTE
-    //     DalvOps.APUT_CHAR
-    //     DalvOps.APUT_SHORT
-    //     DalvOps.IGET
-    //     DalvOps.IGET_WIDE
-    //     DalvOps.IGET_OBJECT
-    //     DalvOps.IGET_BOOLEAN
-    //     DalvOps.IGET_BYTE
-    //     DalvOps.IGET_CHAR
-    //     DalvOps.IGET_SHORT
-    //     DalvOps.IPUT
-    //     DalvOps.IPUT_WIDE
-    //     DalvOps.IPUT_OBJECT
-    //     DalvOps.IPUT_BOOLEAN
-    //     DalvOps.IPUT_BYTE
-    //     DalvOps.IPUT_CHAR
-    //     DalvOps.IPUT_SHORT
-    //     DalvOps.SGET
-    //     DalvOps.SGET_WIDE
-    //     DalvOps.SGET_OBJECT
-    //     DalvOps.SGET_BOOLEAN
-    //     DalvOps.SGET_BYTE
-    //     DalvOps.SGET_CHAR
-    //     DalvOps.SGET_SHORT
-    //     DalvOps.SPUT
-    //     DalvOps.SPUT_WIDE
-    //     DalvOps.SPUT_OBJECT
-    //     DalvOps.SPUT_BOOLEAN
-    //     DalvOps.SPUT_BYTE
-    //     DalvOps.SPUT_CHAR
-    //     DalvOps.SPUT_SHORT
-    //     DalvOps.INVOKE_VIRTUAL
-    //     DalvOps.INVOKE_SUPER
-    //     DalvOps.INVOKE_DIRECT
-    //     DalvOps.INVOKE_STATIC
-    //     DalvOps.INVOKE_INTERFACE
-    //     DalvOps.NEG_INT
-    //     DalvOps.NOT_INT
-    //     DalvOps.NEG_LONG
-    //     DalvOps.NOT_LONG
-    //     DalvOps.NEG_FLOAT
-    //     DalvOps.NEG_DOUBLE
-    //     DalvOps.INT_TO_LONG
-    //     DalvOps.INT_TO_FLOAT
-    //     DalvOps.INT_TO_DOUBLE
-    //     DalvOps.LONG_TO_INT
-    //     DalvOps.LONG_TO_FLOAT
-    //     DalvOps.LONG_TO_DOUBLE
-    //     DalvOps.FLOAT_TO_INT
-    //     DalvOps.FLOAT_TO_LONG
-    //     DalvOps.FLOAT_TO_DOUBLE
-    //     DalvOps.DOUBLE_TO_INT
-    //     DalvOps.DOUBLE_TO_LONG
-    //     DalvOps.DOUBLE_TO_FLOAT
-    //     DalvOps.INT_TO_BYTE
-    //     DalvOps.INT_TO_CHAR
-    //     DalvOps.INT_TO_SHORT
-    //     DalvOps.ADD_INT_2ADDR
-    //     DalvOps.SUB_INT_2ADDR
-    //     DalvOps.MUL_INT_2ADDR
-    //     DalvOps.DIV_INT_2ADDR
-    //     DalvOps.REM_INT_2ADDR
-    //     DalvOps.AND_INT_2ADDR
-    //     DalvOps.OR_INT_2ADDR
-    //     DalvOps.XOR_INT_2ADDR
-    //     DalvOps.SHL_INT_2ADDR
-    //     DalvOps.SHR_INT_2ADDR
-    //     DalvOps.USHR_INT_2ADDR
-    //     DalvOps.ADD_LONG_2ADDR
-    //     DalvOps.SUB_LONG_2ADDR
-    //     DalvOps.MUL_LONG_2ADDR
-    //     DalvOps.DIV_LONG_2ADDR
-    //     DalvOps.REM_LONG_2ADDR
-    //     DalvOps.AND_LONG_2ADDR
-    //     DalvOps.OR_LONG_2ADDR
-    //     DalvOps.XOR_LONG_2ADDR
-    //     DalvOps.SHL_LONG_2ADDR
-    //     DalvOps.SHR_LONG_2ADDR
-    //     DalvOps.USHR_LONG_2ADDR
-    //     DalvOps.ADD_FLOAT_2ADDR
-    //     DalvOps.SUB_FLOAT_2ADDR
-    //     DalvOps.MUL_FLOAT_2ADDR
-    //     DalvOps.DIV_FLOAT_2ADDR
-    //     DalvOps.REM_FLOAT_2ADDR
-    //     DalvOps.ADD_DOUBLE_2ADDR
-    //     DalvOps.SUB_DOUBLE_2ADDR
-    //     DalvOps.MUL_DOUBLE_2ADDR
-    //     DalvOps.DIV_DOUBLE_2ADDR
-    //     DalvOps.REM_DOUBLE_2ADDR
-    //     DalvOps.ADD_INT_LIT8
-    //     DalvOps.RSUB_INT_LIT8
-    //     DalvOps.MUL_INT_LIT8
-    //     DalvOps.DIV_INT_LIT8
-    //     DalvOps.REM_INT_LIT8
-    //     DalvOps.AND_INT_LIT8
-    //     DalvOps.OR_INT_LIT8
-    //     DalvOps.XOR_INT_LIT8
-    //     DalvOps.SHL_INT_LIT8
-    //     DalvOps.SHR_INT_LIT8
-    //     DalvOps.USHR_INT_LIT8
+    //     Opcodes.NOP
+    //     Opcodes.MOVE
+    //     Opcodes.MOVE_WIDE
+    //     Opcodes.MOVE_OBJECT
+    //     Opcodes.MOVE_RESULT
+    //     Opcodes.MOVE_RESULT_WIDE
+    //     Opcodes.MOVE_RESULT_OBJECT
+    //     Opcodes.MOVE_EXCEPTION
+    //     Opcodes.RETURN_VOID
+    //     Opcodes.RETURN
+    //     Opcodes.RETURN_WIDE
+    //     Opcodes.RETURN_OBJECT
+    //     Opcodes.CONST_4
+    //     Opcodes.CONST_WIDE_16
+    //     Opcodes.CONST_STRING
+    //     Opcodes.CONST_CLASS
+    //     Opcodes.MONITOR_ENTER
+    //     Opcodes.MONITOR_EXIT
+    //     Opcodes.CHECK_CAST
+    //     Opcodes.INSTANCE_OF
+    //     Opcodes.ARRAY_LENGTH
+    //     Opcodes.NEW_INSTANCE
+    //     Opcodes.NEW_ARRAY
+    //     Opcodes.FILLED_NEW_ARRAY
+    //     Opcodes.FILL_ARRAY_DATA
+    //     Opcodes.THROW
+    //     Opcodes.GOTO
+    //     Opcodes.PACKED_SWITCH
+    //     Opcodes.SPARSE_SWITCH
+    //     Opcodes.CMPL_FLOAT
+    //     Opcodes.CMPG_FLOAT
+    //     Opcodes.CMPL_DOUBLE
+    //     Opcodes.CMPG_DOUBLE
+    //     Opcodes.CMP_LONG
+    //     Opcodes.IF_EQ
+    //     Opcodes.IF_NE
+    //     Opcodes.IF_LT
+    //     Opcodes.IF_GE
+    //     Opcodes.IF_GT
+    //     Opcodes.IF_LE
+    //     Opcodes.IF_EQZ
+    //     Opcodes.IF_NEZ
+    //     Opcodes.IF_LTZ
+    //     Opcodes.IF_GEZ
+    //     Opcodes.IF_GTZ
+    //     Opcodes.IF_LEZ
+    //     Opcodes.AGET
+    //     Opcodes.AGET_WIDE
+    //     Opcodes.AGET_OBJECT
+    //     Opcodes.AGET_BOOLEAN
+    //     Opcodes.AGET_BYTE
+    //     Opcodes.AGET_CHAR
+    //     Opcodes.AGET_SHORT
+    //     Opcodes.APUT
+    //     Opcodes.APUT_WIDE
+    //     Opcodes.APUT_OBJECT
+    //     Opcodes.APUT_BOOLEAN
+    //     Opcodes.APUT_BYTE
+    //     Opcodes.APUT_CHAR
+    //     Opcodes.APUT_SHORT
+    //     Opcodes.IGET
+    //     Opcodes.IGET_WIDE
+    //     Opcodes.IGET_OBJECT
+    //     Opcodes.IGET_BOOLEAN
+    //     Opcodes.IGET_BYTE
+    //     Opcodes.IGET_CHAR
+    //     Opcodes.IGET_SHORT
+    //     Opcodes.IPUT
+    //     Opcodes.IPUT_WIDE
+    //     Opcodes.IPUT_OBJECT
+    //     Opcodes.IPUT_BOOLEAN
+    //     Opcodes.IPUT_BYTE
+    //     Opcodes.IPUT_CHAR
+    //     Opcodes.IPUT_SHORT
+    //     Opcodes.SGET
+    //     Opcodes.SGET_WIDE
+    //     Opcodes.SGET_OBJECT
+    //     Opcodes.SGET_BOOLEAN
+    //     Opcodes.SGET_BYTE
+    //     Opcodes.SGET_CHAR
+    //     Opcodes.SGET_SHORT
+    //     Opcodes.SPUT
+    //     Opcodes.SPUT_WIDE
+    //     Opcodes.SPUT_OBJECT
+    //     Opcodes.SPUT_BOOLEAN
+    //     Opcodes.SPUT_BYTE
+    //     Opcodes.SPUT_CHAR
+    //     Opcodes.SPUT_SHORT
+    //     Opcodes.INVOKE_VIRTUAL
+    //     Opcodes.INVOKE_SUPER
+    //     Opcodes.INVOKE_DIRECT
+    //     Opcodes.INVOKE_STATIC
+    //     Opcodes.INVOKE_INTERFACE
+    //     Opcodes.NEG_INT
+    //     Opcodes.NOT_INT
+    //     Opcodes.NEG_LONG
+    //     Opcodes.NOT_LONG
+    //     Opcodes.NEG_FLOAT
+    //     Opcodes.NEG_DOUBLE
+    //     Opcodes.INT_TO_LONG
+    //     Opcodes.INT_TO_FLOAT
+    //     Opcodes.INT_TO_DOUBLE
+    //     Opcodes.LONG_TO_INT
+    //     Opcodes.LONG_TO_FLOAT
+    //     Opcodes.LONG_TO_DOUBLE
+    //     Opcodes.FLOAT_TO_INT
+    //     Opcodes.FLOAT_TO_LONG
+    //     Opcodes.FLOAT_TO_DOUBLE
+    //     Opcodes.DOUBLE_TO_INT
+    //     Opcodes.DOUBLE_TO_LONG
+    //     Opcodes.DOUBLE_TO_FLOAT
+    //     Opcodes.INT_TO_BYTE
+    //     Opcodes.INT_TO_CHAR
+    //     Opcodes.INT_TO_SHORT
+    //     Opcodes.ADD_INT_2ADDR
+    //     Opcodes.SUB_INT_2ADDR
+    //     Opcodes.MUL_INT_2ADDR
+    //     Opcodes.DIV_INT_2ADDR
+    //     Opcodes.REM_INT_2ADDR
+    //     Opcodes.AND_INT_2ADDR
+    //     Opcodes.OR_INT_2ADDR
+    //     Opcodes.XOR_INT_2ADDR
+    //     Opcodes.SHL_INT_2ADDR
+    //     Opcodes.SHR_INT_2ADDR
+    //     Opcodes.USHR_INT_2ADDR
+    //     Opcodes.ADD_LONG_2ADDR
+    //     Opcodes.SUB_LONG_2ADDR
+    //     Opcodes.MUL_LONG_2ADDR
+    //     Opcodes.DIV_LONG_2ADDR
+    //     Opcodes.REM_LONG_2ADDR
+    //     Opcodes.AND_LONG_2ADDR
+    //     Opcodes.OR_LONG_2ADDR
+    //     Opcodes.XOR_LONG_2ADDR
+    //     Opcodes.SHL_LONG_2ADDR
+    //     Opcodes.SHR_LONG_2ADDR
+    //     Opcodes.USHR_LONG_2ADDR
+    //     Opcodes.ADD_FLOAT_2ADDR
+    //     Opcodes.SUB_FLOAT_2ADDR
+    //     Opcodes.MUL_FLOAT_2ADDR
+    //     Opcodes.DIV_FLOAT_2ADDR
+    //     Opcodes.REM_FLOAT_2ADDR
+    //     Opcodes.ADD_DOUBLE_2ADDR
+    //     Opcodes.SUB_DOUBLE_2ADDR
+    //     Opcodes.MUL_DOUBLE_2ADDR
+    //     Opcodes.DIV_DOUBLE_2ADDR
+    //     Opcodes.REM_DOUBLE_2ADDR
+    //     Opcodes.ADD_INT_LIT8
+    //     Opcodes.RSUB_INT_LIT8
+    //     Opcodes.MUL_INT_LIT8
+    //     Opcodes.DIV_INT_LIT8
+    //     Opcodes.REM_INT_LIT8
+    //     Opcodes.AND_INT_LIT8
+    //     Opcodes.OR_INT_LIT8
+    //     Opcodes.XOR_INT_LIT8
+    //     Opcodes.SHL_INT_LIT8
+    //     Opcodes.SHR_INT_LIT8
+    //     Opcodes.USHR_INT_LIT8
     // END(first-opcodes)
 
     static {
@@ -318,10 +318,10 @@
         MAP.put(Rops.ADD_CONST_INT,        Dops.ADD_INT_LIT8);
         // Note: No dalvik ops for other types of add_const.
 
+        MAP.put(Rops.SUB_CONST_INT,        Dops.RSUB_INT_LIT8);
         /*
-         * Note: No dalvik ops for any type of sub_const; there's a
-         * *reverse* sub (constant - reg) for ints, though, but that
-         * should end up getting handled at optimization time.
+         * Note: No dalvik ops for any type of sub_const; instead
+         * there's a *reverse* sub (constant - reg) for ints only.
          */
 
         MAP.put(Rops.MUL_CONST_INT,        Dops.MUL_INT_LIT8);
diff --git a/dx/src/com/android/dx/dex/code/RopTranslator.java b/dx/src/com/android/dx/dex/code/RopTranslator.java
index a38ea11..9899c43 100644
--- a/dx/src/com/android/dx/dex/code/RopTranslator.java
+++ b/dx/src/com/android/dx/dex/code/RopTranslator.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.code.BasicBlock;
 import com.android.dx.rop.code.BasicBlockList;
 import com.android.dx.rop.code.FillArrayDataInsn;
@@ -35,7 +36,6 @@
 import com.android.dx.rop.code.ThrowingInsn;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstInteger;
-import com.android.dx.rop.type.Type;
 import com.android.dx.util.Bits;
 import com.android.dx.util.IntList;
 
@@ -181,6 +181,7 @@
          * subsequent block in the case of synchronized methods.
          */
         method.getBlocks().forEachInsn(new Insn.BaseVisitor() {
+            @Override
             public void visitPlainCstInsn(PlainCstInsn insn) {
                 if (insn.getOpcode().getOpcode()== RegOps.MOVE_PARAM) {
                     int param =
@@ -709,7 +710,7 @@
                 }
 
                 if ((rop.getOpcode() == RegOps.NEW_ARRAY) &&
-                    (opcode.getOpcode() != DalvOps.NEW_ARRAY)) {
+                    (opcode.getOpcode() != Opcodes.NEW_ARRAY)) {
                     /*
                      * It's a type-specific new-array-<primitive>, and
                      * so it should be turned into a SimpleInsn (no
diff --git a/dx/src/com/android/dx/dex/code/SwitchData.java b/dx/src/com/android/dx/dex/code/SwitchData.java
index 27a6342..8fc80b1 100644
--- a/dx/src/com/android/dx/dex/code/SwitchData.java
+++ b/dx/src/com/android/dx/dex/code/SwitchData.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.code;
 
+import com.android.dx.io.Opcodes;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.code.SourcePosition;
 import com.android.dx.util.AnnotatedOutput;
@@ -108,7 +109,7 @@
             int lastCase = (sz == 0) ? 0 : cases.get(sz - 1);
             int outSz = lastCase - firstCase + 1;
 
-            out.writeShort(0x100 | DalvOps.NOP);
+            out.writeShort(Opcodes.PACKED_SWITCH_PAYLOAD);
             out.writeShort(outSz);
             out.writeInt(firstCase);
 
@@ -128,7 +129,7 @@
                 out.writeInt(relTarget);
             }
         } else {
-            out.writeShort(0x200 | DalvOps.NOP);
+            out.writeShort(Opcodes.SPARSE_SWITCH_PAYLOAD);
             out.writeShort(sz);
 
             for (int i = 0; i < sz; i++) {
@@ -181,7 +182,7 @@
         int sz = targets.length;
 
         sb.append(packed ? "packed" : "sparse");
-        sb.append("-switch-data // for switch @ ");
+        sb.append("-switch-payload // for switch @ ");
         sb.append(Hex.u2(baseAddress));
 
         for (int i = 0; i < sz; i++) {
diff --git a/dx/src/com/android/dx/dex/code/form/Form11n.java b/dx/src/com/android/dx/dex/code/form/Form11n.java
index 904aa6b..479af6e 100644
--- a/dx/src/com/android/dx/dex/code/form/Form11n.java
+++ b/dx/src/com/android/dx/dex/code/form/Form11n.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 11n}. See the instruction format spec
  * for details.
@@ -87,6 +89,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInNibble(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form11x.java b/dx/src/com/android/dx/dex/code/form/Form11x.java
index 739e7d3..82dda65 100644
--- a/dx/src/com/android/dx/dex/code/form/Form11x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form11x.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 11x}. See the instruction format spec
  * for details.
@@ -69,6 +71,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         write(out, opcodeUnit(insn, regs.get(0).getReg()));
diff --git a/dx/src/com/android/dx/dex/code/form/Form12x.java b/dx/src/com/android/dx/dex/code/form/Form12x.java
index 28c926c..aabab8a 100644
--- a/dx/src/com/android/dx/dex/code/form/Form12x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form12x.java
@@ -17,13 +17,14 @@
 package com.android.dx.dex.code.form;
 
 import com.android.dx.dex.code.DalvInsn;
-import com.android.dx.dex.code.HighRegisterPrefix;
 import com.android.dx.dex.code.InsnFormat;
 import com.android.dx.dex.code.SimpleInsn;
 import com.android.dx.rop.code.RegisterSpec;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 12x}. See the instruction format spec
  * for details.
@@ -109,6 +110,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInNibble(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInNibble(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int sz = regs.size();
diff --git a/dx/src/com/android/dx/dex/code/form/Form21c.java b/dx/src/com/android/dx/dex/code/form/Form21c.java
index 5a7ee45..0335dc7 100644
--- a/dx/src/com/android/dx/dex/code/form/Form21c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form21c.java
@@ -27,6 +27,8 @@
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 21c}. See the instruction format spec
  * for details.
@@ -116,6 +118,26 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        int sz = regs.size();
+        BitSet bits = new BitSet(sz);
+        boolean compat = unsignedFitsInByte(regs.get(0).getReg());
+
+        if (sz == 1) {
+            bits.set(0, compat);
+        } else {
+            if (regs.get(0).getReg() == regs.get(1).getReg()) {
+                bits.set(0, compat);
+                bits.set(1, compat);
+            }
+        }
+
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int cpi = ((CstInsn) insn).getIndex();
diff --git a/dx/src/com/android/dx/dex/code/form/Form21h.java b/dx/src/com/android/dx/dex/code/form/Form21h.java
index 03ebc29..02cc0fd 100644
--- a/dx/src/com/android/dx/dex/code/form/Form21h.java
+++ b/dx/src/com/android/dx/dex/code/form/Form21h.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 21h}. See the instruction format spec
  * for details.
@@ -97,6 +99,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         CstLiteralBits cb = (CstLiteralBits) ((CstInsn) insn).getConstant();
diff --git a/dx/src/com/android/dx/dex/code/form/Form21s.java b/dx/src/com/android/dx/dex/code/form/Form21s.java
index b7a3f72..9264ec0 100644
--- a/dx/src/com/android/dx/dex/code/form/Form21s.java
+++ b/dx/src/com/android/dx/dex/code/form/Form21s.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 21s}. See the instruction format spec
  * for details.
@@ -86,6 +88,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form21t.java b/dx/src/com/android/dx/dex/code/form/Form21t.java
index d269097..8adb668 100644
--- a/dx/src/com/android/dx/dex/code/form/Form21t.java
+++ b/dx/src/com/android/dx/dex/code/form/Form21t.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 21t}. See the instruction format spec
  * for details.
@@ -74,6 +76,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public boolean branchFits(TargetInsn insn) {
         int offset = insn.getTargetOffset();
 
diff --git a/dx/src/com/android/dx/dex/code/form/Form22b.java b/dx/src/com/android/dx/dex/code/form/Form22b.java
index eb3b884..e5a8b5d 100644
--- a/dx/src/com/android/dx/dex/code/form/Form22b.java
+++ b/dx/src/com/android/dx/dex/code/form/Form22b.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 22b}. See the instruction format spec
  * for details.
@@ -88,6 +90,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInByte(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form22c.java b/dx/src/com/android/dx/dex/code/form/Form22c.java
index f2a3555..5ffdb86 100644
--- a/dx/src/com/android/dx/dex/code/form/Form22c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form22c.java
@@ -22,10 +22,11 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstFieldRef;
-import com.android.dx.rop.cst.CstString;
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 22c}. See the instruction format spec
  * for details.
@@ -91,6 +92,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInNibble(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInNibble(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int cpi = ((CstInsn) insn).getIndex();
diff --git a/dx/src/com/android/dx/dex/code/form/Form22s.java b/dx/src/com/android/dx/dex/code/form/Form22s.java
index 4b67071..03d180a 100644
--- a/dx/src/com/android/dx/dex/code/form/Form22s.java
+++ b/dx/src/com/android/dx/dex/code/form/Form22s.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 22s}. See the instruction format spec
  * for details.
@@ -88,6 +90,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInNibble(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInNibble(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form22t.java b/dx/src/com/android/dx/dex/code/form/Form22t.java
index d216ccc..15ce0f8 100644
--- a/dx/src/com/android/dx/dex/code/form/Form22t.java
+++ b/dx/src/com/android/dx/dex/code/form/Form22t.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 22t}. See the instruction format spec
  * for details.
@@ -76,6 +78,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInNibble(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInNibble(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public boolean branchFits(TargetInsn insn) {
         int offset = insn.getTargetOffset();
 
diff --git a/dx/src/com/android/dx/dex/code/form/Form22x.java b/dx/src/com/android/dx/dex/code/form/Form22x.java
index daf1da2..01eec0b 100644
--- a/dx/src/com/android/dx/dex/code/form/Form22x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form22x.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 22x}. See the instruction format spec
  * for details.
@@ -71,6 +73,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInShort(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         write(out,
diff --git a/dx/src/com/android/dx/dex/code/form/Form23x.java b/dx/src/com/android/dx/dex/code/form/Form23x.java
index 849e8be..9164482 100644
--- a/dx/src/com/android/dx/dex/code/form/Form23x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form23x.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 23x}. See the instruction format spec
  * for details.
@@ -73,6 +75,18 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(3);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInByte(regs.get(1).getReg()));
+        bits.set(2, unsignedFitsInByte(regs.get(2).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         write(out,
diff --git a/dx/src/com/android/dx/dex/code/form/Form31c.java b/dx/src/com/android/dx/dex/code/form/Form31c.java
index 7383651..3295fda 100644
--- a/dx/src/com/android/dx/dex/code/form/Form31c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form31c.java
@@ -27,6 +27,8 @@
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 31c}. See the instruction format spec
  * for details.
@@ -111,6 +113,26 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        int sz = regs.size();
+        BitSet bits = new BitSet(sz);
+        boolean compat = unsignedFitsInByte(regs.get(0).getReg());
+
+        if (sz == 1) {
+            bits.set(0, compat);
+        } else {
+            if (regs.get(0).getReg() == regs.get(1).getReg()) {
+                bits.set(0, compat);
+                bits.set(1, compat);
+            }
+        }
+
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int cpi = ((CstInsn) insn).getIndex();
diff --git a/dx/src/com/android/dx/dex/code/form/Form31i.java b/dx/src/com/android/dx/dex/code/form/Form31i.java
index 7e2583d..b52341d 100644
--- a/dx/src/com/android/dx/dex/code/form/Form31i.java
+++ b/dx/src/com/android/dx/dex/code/form/Form31i.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 31i}. See the instruction format spec
  * for details.
@@ -84,6 +86,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form31t.java b/dx/src/com/android/dx/dex/code/form/Form31t.java
index c7ead27..1999bba 100644
--- a/dx/src/com/android/dx/dex/code/form/Form31t.java
+++ b/dx/src/com/android/dx/dex/code/form/Form31t.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 31t}. See the instruction format spec
  * for details.
@@ -73,6 +75,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public boolean branchFits(TargetInsn insn) {
         return true;
     }
diff --git a/dx/src/com/android/dx/dex/code/form/Form32s.java b/dx/src/com/android/dx/dex/code/form/Form32s.java
index d9118a3..e081470 100644
--- a/dx/src/com/android/dx/dex/code/form/Form32s.java
+++ b/dx/src/com/android/dx/dex/code/form/Form32s.java
@@ -24,6 +24,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 32s}. See the instruction format spec
  * for details.
@@ -92,6 +94,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInByte(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form32x.java b/dx/src/com/android/dx/dex/code/form/Form32x.java
index 87ed6ba..abed0e9 100644
--- a/dx/src/com/android/dx/dex/code/form/Form32x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form32x.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 32x}. See the instruction format spec
  * for details.
@@ -70,6 +72,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInShort(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInShort(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
 
diff --git a/dx/src/com/android/dx/dex/code/form/Form33x.java b/dx/src/com/android/dx/dex/code/form/Form33x.java
index fb143a4..9a569a0 100644
--- a/dx/src/com/android/dx/dex/code/form/Form33x.java
+++ b/dx/src/com/android/dx/dex/code/form/Form33x.java
@@ -22,6 +22,8 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 33x}. See the instruction format spec
  * for details.
@@ -77,6 +79,18 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(3);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInByte(regs.get(1).getReg()));
+        bits.set(2, unsignedFitsInShort(regs.get(2).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         write(out,
diff --git a/dx/src/com/android/dx/dex/code/form/Form35c.java b/dx/src/com/android/dx/dex/code/form/Form35c.java
index a8b6f44..b9c12c6 100644
--- a/dx/src/com/android/dx/dex/code/form/Form35c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form35c.java
@@ -27,6 +27,8 @@
 import com.android.dx.rop.type.Type;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 35c}. See the instruction format spec
  * for details.
@@ -95,6 +97,28 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        int sz = regs.size();
+        BitSet bits = new BitSet(sz);
+
+        for (int i = 0; i < sz; i++) {
+            RegisterSpec reg = regs.get(i);
+            /*
+             * The check below adds (category - 1) to the register, to
+             * account for the fact that the second half of a
+             * category-2 register has to be represented explicitly in
+             * the result.
+             */
+            bits.set(i, unsignedFitsInNibble(reg.getReg() +
+                                             reg.getCategory() - 1));
+        }
+
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         int cpi = ((CstInsn) insn).getIndex();
         RegisterSpecList regs = explicitize(insn.getRegisters());
diff --git a/dx/src/com/android/dx/dex/code/form/Form3rc.java b/dx/src/com/android/dx/dex/code/form/Form3rc.java
index fde7744..1727af5 100644
--- a/dx/src/com/android/dx/dex/code/form/Form3rc.java
+++ b/dx/src/com/android/dx/dex/code/form/Form3rc.java
@@ -19,7 +19,6 @@
 import com.android.dx.dex.code.CstInsn;
 import com.android.dx.dex.code.DalvInsn;
 import com.android.dx.dex.code.InsnFormat;
-import com.android.dx.rop.code.RegisterSpec;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstMethodRef;
diff --git a/dx/src/com/android/dx/dex/code/form/Form41c.java b/dx/src/com/android/dx/dex/code/form/Form41c.java
index 65b4bec..24067bc 100644
--- a/dx/src/com/android/dx/dex/code/form/Form41c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form41c.java
@@ -23,10 +23,11 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstFieldRef;
-import com.android.dx.rop.cst.CstString;
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 41c}. See the instruction format spec
  * for details.
@@ -114,6 +115,26 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        int sz = regs.size();
+        BitSet bits = new BitSet(sz);
+        boolean compat = unsignedFitsInByte(regs.get(0).getReg());
+
+        if (sz == 1) {
+            bits.set(0, compat);
+        } else {
+            if (regs.get(0).getReg() == regs.get(1).getReg()) {
+                bits.set(0, compat);
+                bits.set(1, compat);
+            }
+        }
+
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int cpi = ((CstInsn) insn).getIndex();
diff --git a/dx/src/com/android/dx/dex/code/form/Form51l.java b/dx/src/com/android/dx/dex/code/form/Form51l.java
index 447351d..4dc7bcd 100644
--- a/dx/src/com/android/dx/dex/code/form/Form51l.java
+++ b/dx/src/com/android/dx/dex/code/form/Form51l.java
@@ -25,6 +25,8 @@
 import com.android.dx.rop.cst.CstLiteralBits;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 51l}. See the instruction format spec
  * for details.
@@ -81,6 +83,16 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(1);
+
+        bits.set(0, unsignedFitsInByte(regs.get(0).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         long value =
diff --git a/dx/src/com/android/dx/dex/code/form/Form52c.java b/dx/src/com/android/dx/dex/code/form/Form52c.java
index 7ebeb85..acd2124 100644
--- a/dx/src/com/android/dx/dex/code/form/Form52c.java
+++ b/dx/src/com/android/dx/dex/code/form/Form52c.java
@@ -22,10 +22,11 @@
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstFieldRef;
-import com.android.dx.rop.cst.CstString;
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.util.AnnotatedOutput;
 
+import java.util.BitSet;
+
 /**
  * Instruction format {@code 52c}. See the instruction format spec
  * for details.
@@ -90,6 +91,17 @@
 
     /** {@inheritDoc} */
     @Override
+    public BitSet compatibleRegs(DalvInsn insn) {
+        RegisterSpecList regs = insn.getRegisters();
+        BitSet bits = new BitSet(2);
+
+        bits.set(0, unsignedFitsInShort(regs.get(0).getReg()));
+        bits.set(1, unsignedFitsInShort(regs.get(1).getReg()));
+        return bits;
+    }
+
+    /** {@inheritDoc} */
+    @Override
     public void writeTo(AnnotatedOutput out, DalvInsn insn) {
         RegisterSpecList regs = insn.getRegisters();
         int cpi = ((CstInsn) insn).getIndex();
diff --git a/dx/src/com/android/dx/dex/code/form/Form5rc.java b/dx/src/com/android/dx/dex/code/form/Form5rc.java
index b05acd6..0c54702 100644
--- a/dx/src/com/android/dx/dex/code/form/Form5rc.java
+++ b/dx/src/com/android/dx/dex/code/form/Form5rc.java
@@ -19,7 +19,6 @@
 import com.android.dx.dex.code.CstInsn;
 import com.android.dx.dex.code.DalvInsn;
 import com.android.dx.dex.code.InsnFormat;
-import com.android.dx.rop.code.RegisterSpec;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstMethodRef;
@@ -101,6 +100,6 @@
         int firstReg = (regs.size() == 0) ? 0 : regs.get(0).getReg();
         int count = regs.getWordCount();
 
-        write(out, opcodeUnit(insn), cpi, (short) firstReg, (short) count);
+        write(out, opcodeUnit(insn), cpi, (short) count, (short) firstReg);
     }
 }
diff --git a/dx/src/com/android/dx/dex/code/form/SpecialFormat.java b/dx/src/com/android/dx/dex/code/form/SpecialFormat.java
index 2d04964..87091b5 100644
--- a/dx/src/com/android/dx/dex/code/form/SpecialFormat.java
+++ b/dx/src/com/android/dx/dex/code/form/SpecialFormat.java
@@ -17,7 +17,6 @@
 package com.android.dx.dex.code.form;
 
 import com.android.dx.dex.code.DalvInsn;
-import com.android.dx.dex.code.DalvOps;
 import com.android.dx.dex.code.InsnFormat;
 import com.android.dx.util.AnnotatedOutput;
 
diff --git a/dx/src/com/android/dx/dex/file/CatchStructs.java b/dx/src/com/android/dx/dex/file/CatchStructs.java
index e07ec29..8b0f1bd 100644
--- a/dx/src/com/android/dx/dex/file/CatchStructs.java
+++ b/dx/src/com/android/dx/dex/file/CatchStructs.java
@@ -19,8 +19,6 @@
 import com.android.dx.dex.code.CatchHandlerList;
 import com.android.dx.dex.code.CatchTable;
 import com.android.dx.dex.code.DalvCode;
-import com.android.dx.rop.cst.CstType;
-import com.android.dx.rop.type.Type;
 import com.android.dx.util.AnnotatedOutput;
 import com.android.dx.util.ByteArrayAnnotatedOutput;
 import com.android.dx.util.Hex;
@@ -141,7 +139,7 @@
 
         // Write out the handlers "header" consisting of its size in entries.
         encodedHandlerHeaderSize =
-            out.writeUnsignedLeb128(handlerOffsets.size());
+            out.writeUleb128(handlerOffsets.size());
 
         // Now write the lists out in order, noting the offset of each.
         for (Map.Entry<CatchHandlerList, Integer> mapping :
@@ -155,21 +153,21 @@
 
             if (catchesAll) {
                 // A size <= 0 means that the list ends with a catch-all.
-                out.writeSignedLeb128(-(listSize - 1));
+                out.writeSleb128(-(listSize - 1));
                 listSize--;
             } else {
-                out.writeSignedLeb128(listSize);
+                out.writeSleb128(listSize);
             }
 
             for (int i = 0; i < listSize; i++) {
                 CatchHandlerList.Entry entry = list.get(i);
-                out.writeUnsignedLeb128(
+                out.writeUleb128(
                         typeIds.indexOf(entry.getExceptionType()));
-                out.writeUnsignedLeb128(entry.getHandler());
+                out.writeUleb128(entry.getHandler());
             }
 
             if (catchesAll) {
-                out.writeUnsignedLeb128(list.get(listSize).getHandler());
+                out.writeUleb128(list.get(listSize).getHandler());
             }
         }
 
diff --git a/dx/src/com/android/dx/dex/file/ClassDataItem.java b/dx/src/com/android/dx/dex/file/ClassDataItem.java
index 275ae99..e9ae18b 100644
--- a/dx/src/com/android/dx/dex/file/ClassDataItem.java
+++ b/dx/src/com/android/dx/dex/file/ClassDataItem.java
@@ -23,15 +23,12 @@
 import com.android.dx.rop.cst.Zeroes;
 import com.android.dx.util.ByteArrayAnnotatedOutput;
 import com.android.dx.util.AnnotatedOutput;
-import com.android.dx.util.Hex;
 import com.android.dx.util.Writers;
 
 import java.io.PrintWriter;
 import java.io.Writer;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
-import java.util.List;
 import java.util.HashMap;
 
 /**
@@ -379,7 +376,7 @@
                             size));
         }
 
-        out.writeUnsignedLeb128(size);
+        out.writeUleb128(size);
     }
 
     /**
diff --git a/dx/src/com/android/dx/dex/file/ClassDefItem.java b/dx/src/com/android/dx/dex/file/ClassDefItem.java
index 4d1719b..4132fb9 100644
--- a/dx/src/com/android/dx/dex/file/ClassDefItem.java
+++ b/dx/src/com/android/dx/dex/file/ClassDefItem.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.file;
 
+import com.android.dx.dex.SizeOf;
 import com.android.dx.rop.annotation.Annotations;
 import com.android.dx.rop.annotation.AnnotationsList;
 import com.android.dx.rop.code.AccessFlags;
@@ -34,7 +35,6 @@
 import java.io.PrintWriter;
 import java.io.Writer;
 import java.util.ArrayList;
-import java.util.TreeSet;
 
 /**
  * Representation of a Dalvik class, which is basically a set of
@@ -42,8 +42,6 @@
  * information.
  */
 public final class ClassDefItem extends IndexedItem {
-    /** size of instances when written out to a file, in bytes */
-    public static final int WRITE_SIZE = 32;
 
     /** {@code non-null;} type constant for this class */
     private final CstType thisClass;
@@ -122,7 +120,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return WRITE_SIZE;
+        return SizeOf.CLASS_DEF_ITEM;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/dex/file/DebugInfoDecoder.java b/dx/src/com/android/dx/dex/file/DebugInfoDecoder.java
index e823816..ee275ce 100644
--- a/dx/src/com/android/dx/dex/file/DebugInfoDecoder.java
+++ b/dx/src/com/android/dx/dex/file/DebugInfoDecoder.java
@@ -27,9 +27,11 @@
 import com.android.dx.rop.type.Type;
 import com.android.dx.util.ExceptionWithContext;
 
+import com.android.dx.util.Leb128Utils;
 import java.io.ByteArrayInputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -218,8 +220,8 @@
      * @return index into file's string ids table, -1 means null
      * @throws IOException
      */
-    private int readStringIndex(InputStream bs) throws IOException {
-        int offsetIndex = readUnsignedLeb128(bs);
+    private int readStringIndex(DataInput bs) throws IOException {
+        int offsetIndex = Leb128Utils.readUnsignedLeb128(bs);
 
         return offsetIndex - 1;
     }
@@ -237,10 +239,10 @@
     }
 
     private void decode0() throws IOException {
-        ByteArrayInputStream bs = new ByteArrayInputStream(encoded);
+        DataInput bs = new DataInputStream(new ByteArrayInputStream(encoded));
 
-        line = readUnsignedLeb128(bs);
-        int szParams = readUnsignedLeb128(bs);
+        line = Leb128Utils.readUnsignedLeb128(bs);
+        int szParams = Leb128Utils.readUnsignedLeb128(bs);
         StdTypeList params = desc.getParameterTypes();
         int curReg = getParamBase();
 
@@ -281,17 +283,11 @@
         }
 
         for (;;) {
-            int opcode = bs.read();
-
-            if (opcode < 0) {
-                throw new RuntimeException
-                        ("Reached end of debug stream without "
-                                + "encountering end marker");
-            }
+            int opcode = bs.readByte() & 0xff;
 
             switch (opcode) {
                 case DBG_START_LOCAL: {
-                    int reg = readUnsignedLeb128(bs);
+                    int reg = Leb128Utils.readUnsignedLeb128(bs);
                     int nameIdx = readStringIndex(bs);
                     int typeIdx = readStringIndex(bs);
                     LocalEntry le = new LocalEntry(
@@ -303,7 +299,7 @@
                 break;
 
                 case DBG_START_LOCAL_EXTENDED: {
-                    int reg = readUnsignedLeb128(bs);
+                    int reg = Leb128Utils.readUnsignedLeb128(bs);
                     int nameIdx = readStringIndex(bs);
                     int typeIdx = readStringIndex(bs);
                     int sigIdx = readStringIndex(bs);
@@ -316,7 +312,7 @@
                 break;
 
                 case DBG_RESTART_LOCAL: {
-                    int reg = readUnsignedLeb128(bs);
+                    int reg = Leb128Utils.readUnsignedLeb128(bs);
                     LocalEntry prevle;
                     LocalEntry le;
 
@@ -342,7 +338,7 @@
                 break;
 
                 case DBG_END_LOCAL: {
-                    int reg = readUnsignedLeb128(bs);
+                    int reg = Leb128Utils.readUnsignedLeb128(bs);
                     LocalEntry prevle;
                     LocalEntry le;
 
@@ -372,11 +368,11 @@
                 return;
 
                 case DBG_ADVANCE_PC:
-                    address += readUnsignedLeb128(bs);
+                    address += Leb128Utils.readUnsignedLeb128(bs);
                 break;
 
                 case DBG_ADVANCE_LINE:
-                    line += readSignedLeb128(bs);
+                    line += Leb128Utils.readSignedLeb128(bs);
                 break;
 
                 case DBG_SET_PROLOGUE_END:
@@ -589,65 +585,4 @@
             throw new RuntimeException("local table problem");
         }
     }
-
-    /**
-     * Reads a DWARFv3-style signed LEB128 integer to the specified stream.
-     * See DWARF v3 section 7.6. An invalid sequence produces an IOException.
-     *
-     * @param bs stream to input from
-     * @return read value
-     * @throws IOException on invalid sequence in addition to
-     * those caused by the InputStream
-     */
-    public static int readSignedLeb128(InputStream bs) throws IOException {
-        int result = 0;
-        int cur;
-        int count = 0;
-        int signBits = -1;
-
-        do {
-            cur = bs.read();
-            result |= (cur & 0x7f) << (count * 7);
-            signBits <<= 7;
-            count++;
-        } while (((cur & 0x80) == 0x80) && count < 5);
-
-        if ((cur & 0x80) == 0x80) {
-            throw new IOException ("invalid LEB128 sequence");
-        }
-
-        // Sign extend if appropriate
-        if (((signBits >> 1) & result) != 0 ) {
-            result |= signBits;
-        }
-
-        return result;
-    }
-
-    /**
-     * Reads a DWARFv3-style unsigned LEB128 integer to the specified stream.
-     * See DWARF v3 section 7.6. An invalid sequence produces an IOException.
-     *
-     * @param bs stream to input from
-     * @return read value, which should be treated as an unsigned value.
-     * @throws IOException on invalid sequence in addition to
-     * those caused by the InputStream
-     */
-    public static int readUnsignedLeb128(InputStream bs) throws IOException {
-        int result = 0;
-        int cur;
-        int count = 0;
-
-        do {
-            cur = bs.read();
-            result |= (cur & 0x7f) << (count * 7);
-            count++;
-        } while (((cur & 0x80) == 0x80) && count < 5);
-
-        if ((cur & 0x80) == 0x80) {
-            throw new IOException ("invalid LEB128 sequence");
-        }
-
-        return result;
-    }
 }
diff --git a/dx/src/com/android/dx/dex/file/DebugInfoEncoder.java b/dx/src/com/android/dx/dex/file/DebugInfoEncoder.java
index 08b6637..d9d4ebc 100644
--- a/dx/src/com/android/dx/dex/file/DebugInfoEncoder.java
+++ b/dx/src/com/android/dx/dex/file/DebugInfoEncoder.java
@@ -376,7 +376,7 @@
             PositionList.Entry entry = sortedPositions.get(0);
             line = entry.getPosition().getLine();
         }
-        output.writeUnsignedLeb128(line);
+        output.writeUleb128(line);
 
         if (annotate) {
             annotate(output.getCursor() - mark, "line_start: " + line);
@@ -403,7 +403,7 @@
 
         // Write out the number of parameter entries that will follow.
         mark = output.getCursor();
-        output.writeUnsignedLeb128(szParamTypes);
+        output.writeUleb128(szParamTypes);
 
         if (annotate) {
             annotate(output.getCursor() - mark,
@@ -638,10 +638,10 @@
      */
     private void emitStringIndex(CstUtf8 string) throws IOException {
         if ((string == null) || (file == null)) {
-            output.writeUnsignedLeb128(0);
+            output.writeUleb128(0);
         } else {
-            output.writeUnsignedLeb128(
-                1 + file.getStringIds().indexOf(string));
+            output.writeUleb128(
+                    1 + file.getStringIds().indexOf(string));
         }
 
         if (DEBUG) {
@@ -659,10 +659,10 @@
      */
     private void emitTypeIndex(CstType type) throws IOException {
         if ((type == null) || (file == null)) {
-            output.writeUnsignedLeb128(0);
+            output.writeUleb128(0);
         } else {
-            output.writeUnsignedLeb128(
-                1 + file.getTypeIds().indexOf(type));
+            output.writeUleb128(
+                    1 + file.getTypeIds().indexOf(type));
         }
 
         if (DEBUG) {
@@ -748,7 +748,7 @@
         int mark = output.getCursor();
 
         output.writeByte(DBG_END_LOCAL);
-        output.writeUnsignedLeb128(entry.getRegister());
+        output.writeUleb128(entry.getRegister());
 
         if (annotateTo != null || debugPrint != null) {
             annotate(output.getCursor() - mark,
@@ -851,7 +851,7 @@
         int mark = output.getCursor();
 
         output.writeByte(DBG_ADVANCE_LINE);
-        output.writeSignedLeb128(deltaLines);
+        output.writeSleb128(deltaLines);
         line += deltaLines;
 
         if (annotateTo != null || debugPrint != null) {
@@ -875,7 +875,7 @@
         int mark = output.getCursor();
 
         output.writeByte(DBG_ADVANCE_PC);
-        output.writeUnsignedLeb128(deltaAddress);
+        output.writeUleb128(deltaAddress);
         address += deltaAddress;
 
         if (annotateTo != null || debugPrint != null) {
@@ -903,7 +903,7 @@
                     "Signed value where unsigned required: " + n);
         }
 
-        output.writeUnsignedLeb128(n);
+        output.writeUleb128(n);
     }
 
     /**
diff --git a/dx/src/com/android/dx/dex/file/DexFile.java b/dx/src/com/android/dx/dex/file/DexFile.java
index 1cc9358..73f0864 100644
--- a/dx/src/com/android/dx/dex/file/DexFile.java
+++ b/dx/src/com/android/dx/dex/file/DexFile.java
@@ -134,6 +134,13 @@
     }
 
     /**
+     * Returns true if this dex doesn't contain any class defs.
+     */
+    public boolean isEmpty() {
+        return classDefs.items().isEmpty();
+    }
+
+    /**
      * Adds a class to this instance. It is illegal to attempt to add more
      * than one class with the same name.
      *
diff --git a/dx/src/com/android/dx/dex/file/EncodedField.java b/dx/src/com/android/dx/dex/file/EncodedField.java
index f2a8184..d972479 100644
--- a/dx/src/com/android/dx/dex/file/EncodedField.java
+++ b/dx/src/com/android/dx/dex/file/EncodedField.java
@@ -146,8 +146,8 @@
                     AccessFlags.fieldString(accessFlags));
         }
 
-        out.writeUnsignedLeb128(diff);
-        out.writeUnsignedLeb128(accessFlags);
+        out.writeUleb128(diff);
+        out.writeUleb128(accessFlags);
 
         return fieldIdx;
     }
diff --git a/dx/src/com/android/dx/dex/file/EncodedMethod.java b/dx/src/com/android/dx/dex/file/EncodedMethod.java
index 1b0770f..e707de4 100644
--- a/dx/src/com/android/dx/dex/file/EncodedMethod.java
+++ b/dx/src/com/android/dx/dex/file/EncodedMethod.java
@@ -187,9 +187,9 @@
                     "    code_off:     " + Hex.u4(codeOff));
         }
 
-        out.writeUnsignedLeb128(diff);
-        out.writeUnsignedLeb128(accessFlags);
-        out.writeUnsignedLeb128(codeOff);
+        out.writeUleb128(diff);
+        out.writeUleb128(accessFlags);
+        out.writeUleb128(codeOff);
 
         return methodIdx;
     }
diff --git a/dx/src/com/android/dx/dex/file/HeaderItem.java b/dx/src/com/android/dx/dex/file/HeaderItem.java
index f95ff44..98b938d 100644
--- a/dx/src/com/android/dx/dex/file/HeaderItem.java
+++ b/dx/src/com/android/dx/dex/file/HeaderItem.java
@@ -16,6 +16,8 @@
 
 package com.android.dx.dex.file;
 
+import com.android.dx.dex.DexFormat;
+import com.android.dx.dex.SizeOf;
 import com.android.dx.rop.cst.CstUtf8;
 import com.android.dx.util.AnnotatedOutput;
 import com.android.dx.util.Hex;
@@ -25,18 +27,6 @@
  */
 public final class HeaderItem extends IndexedItem {
     /**
-     * {@code non-null;} the file format magic number, represented as the
-     * low-order bytes of a string
-     */
-    private static final String MAGIC = "dex\n035\0";
-
-    /** size of this section, in bytes */
-    private static final int HEADER_SIZE = 0x70;
-
-    /** the endianness tag */
-    private static final int ENDIAN_TAG = 0x12345678;
-
-    /**
      * Constructs an instance.
      */
     public HeaderItem() {
@@ -52,7 +42,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return HEADER_SIZE;
+        return SizeOf.HEADER_ITEM;
     }
 
     /** {@inheritDoc} */
@@ -72,13 +62,13 @@
             lastDataSection.writeSize() - dataOff;
 
         if (out.annotates()) {
-            out.annotate(8, "magic: " + new CstUtf8(MAGIC).toQuoted());
+            out.annotate(8, "magic: " + new CstUtf8(DexFormat.MAGIC).toQuoted());
             out.annotate(4, "checksum");
             out.annotate(20, "signature");
             out.annotate(4, "file_size:       " +
                          Hex.u4(file.getFileSize()));
-            out.annotate(4, "header_size:     " + Hex.u4(HEADER_SIZE));
-            out.annotate(4, "endian_tag:      " + Hex.u4(ENDIAN_TAG));
+            out.annotate(4, "header_size:     " + Hex.u4(SizeOf.HEADER_ITEM));
+            out.annotate(4, "endian_tag:      " + Hex.u4(DexFormat.ENDIAN_TAG));
             out.annotate(4, "link_size:       0");
             out.annotate(4, "link_off:        0");
             out.annotate(4, "map_off:         " + Hex.u4(mapOff));
@@ -86,15 +76,15 @@
 
         // Write the magic number.
         for (int i = 0; i < 8; i++) {
-            out.writeByte(MAGIC.charAt(i));
+            out.writeByte(DexFormat.MAGIC.charAt(i));
         }
 
         // Leave space for the checksum and signature.
         out.writeZeroes(24);
 
         out.writeInt(file.getFileSize());
-        out.writeInt(HEADER_SIZE);
-        out.writeInt(ENDIAN_TAG);
+        out.writeInt(SizeOf.HEADER_ITEM);
+        out.writeInt(DexFormat.ENDIAN_TAG);
 
         /*
          * Write zeroes for the link size and data, as the output
diff --git a/dx/src/com/android/dx/dex/file/MemberIdItem.java b/dx/src/com/android/dx/dex/file/MemberIdItem.java
index d3a61d4..08a3123 100644
--- a/dx/src/com/android/dx/dex/file/MemberIdItem.java
+++ b/dx/src/com/android/dx/dex/file/MemberIdItem.java
@@ -20,15 +20,13 @@
 import com.android.dx.rop.cst.CstNat;
 import com.android.dx.util.AnnotatedOutput;
 import com.android.dx.util.Hex;
+import com.android.dx.dex.SizeOf;
 
 /**
  * Representation of a member (field or method) reference inside a
  * Dalvik file.
  */
 public abstract class MemberIdItem extends IdItem {
-    /** size of instances when written out to a file, in bytes */
-    public static final int WRITE_SIZE = 8;
-
     /** {@code non-null;} the constant for the member */
     private final CstMemberRef cst;
 
@@ -46,7 +44,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return WRITE_SIZE;
+        return SizeOf.MEMBER_ID_ITEM;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/dex/file/ProtoIdItem.java b/dx/src/com/android/dx/dex/file/ProtoIdItem.java
index 31cf8fb..ffb6167 100644
--- a/dx/src/com/android/dx/dex/file/ProtoIdItem.java
+++ b/dx/src/com/android/dx/dex/file/ProtoIdItem.java
@@ -16,7 +16,7 @@
 
 package com.android.dx.dex.file;
 
-import com.android.dx.rop.cst.CstType;
+import com.android.dx.dex.SizeOf;
 import com.android.dx.rop.cst.CstUtf8;
 import com.android.dx.rop.type.Prototype;
 import com.android.dx.rop.type.StdTypeList;
@@ -28,9 +28,6 @@
  * Representation of a method prototype reference inside a Dalvik file.
  */
 public final class ProtoIdItem extends IndexedItem {
-    /** size of instances when written out to a file, in bytes */
-    public static final int WRITE_SIZE = 12;
-
     /** {@code non-null;} the wrapped prototype */
     private final Prototype prototype;
 
@@ -106,7 +103,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return WRITE_SIZE;
+        return SizeOf.PROTO_ID_ITEM;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/dex/file/StringDataItem.java b/dx/src/com/android/dx/dex/file/StringDataItem.java
index 80dbced..3752cb2 100644
--- a/dx/src/com/android/dx/dex/file/StringDataItem.java
+++ b/dx/src/com/android/dx/dex/file/StringDataItem.java
@@ -78,7 +78,7 @@
             out.annotate(bytes.size() + 1, value.toQuoted());
         }
 
-        out.writeUnsignedLeb128(utf16Size);
+        out.writeUleb128(utf16Size);
         out.write(bytes);
         out.writeByte(0);
     }
diff --git a/dx/src/com/android/dx/dex/file/StringIdItem.java b/dx/src/com/android/dx/dex/file/StringIdItem.java
index cd0d57b..8037df7 100644
--- a/dx/src/com/android/dx/dex/file/StringIdItem.java
+++ b/dx/src/com/android/dx/dex/file/StringIdItem.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.file;
 
+import com.android.dx.dex.SizeOf;
 import com.android.dx.rop.cst.CstUtf8;
 import com.android.dx.util.AnnotatedOutput;
 import com.android.dx.util.Hex;
@@ -25,9 +26,6 @@
  */
 public final class StringIdItem
         extends IndexedItem implements Comparable {
-    /** size of instances when written out to a file, in bytes */
-    public static final int WRITE_SIZE = 4;
-
     /** {@code non-null;} the string value */
     private final CstUtf8 value;
 
@@ -80,7 +78,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return WRITE_SIZE;
+        return SizeOf.STRING_ID_ITEM;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/dex/file/TypeIdItem.java b/dx/src/com/android/dx/dex/file/TypeIdItem.java
index c257e00..6ace661 100644
--- a/dx/src/com/android/dx/dex/file/TypeIdItem.java
+++ b/dx/src/com/android/dx/dex/file/TypeIdItem.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.dex.file;
 
+import com.android.dx.dex.SizeOf;
 import com.android.dx.rop.cst.CstType;
 import com.android.dx.rop.cst.CstUtf8;
 import com.android.dx.util.AnnotatedOutput;
@@ -25,9 +26,6 @@
  * Representation of a type reference inside a Dalvik file.
  */
 public final class TypeIdItem extends IdItem {
-    /** size of instances when written out to a file, in bytes */
-    public static final int WRITE_SIZE = 4;
-
     /**
      * Constructs an instance.
      *
@@ -46,7 +44,7 @@
     /** {@inheritDoc} */
     @Override
     public int writeSize() {
-        return WRITE_SIZE;
+        return SizeOf.TYPE_ID_ITEM;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/dex/file/ValueEncoder.java b/dx/src/com/android/dx/dex/file/ValueEncoder.java
index fba64a7..7a608e0 100644
--- a/dx/src/com/android/dx/dex/file/ValueEncoder.java
+++ b/dx/src/com/android/dx/dex/file/ValueEncoder.java
@@ -277,7 +277,7 @@
             out.annotate("  size: " + Hex.u4(size));
         }
 
-        out.writeUnsignedLeb128(size);
+        out.writeUleb128(size);
 
         for (int i = 0; i < size; i++) {
             Constant cst = list.get(i);
@@ -319,7 +319,7 @@
                     type.toHuman());
         }
 
-        out.writeUnsignedLeb128(typeIds.indexOf(annotation.getType()));
+        out.writeUleb128(typeIds.indexOf(annotation.getType()));
 
         Collection<NameValuePair> pairs = annotation.getNameValuePairs();
         int size = pairs.size();
@@ -328,7 +328,7 @@
             out.annotate("  size: " + Hex.u4(size));
         }
 
-        out.writeUnsignedLeb128(size);
+        out.writeUleb128(size);
 
         int at = 0;
         for (NameValuePair pair : pairs) {
@@ -343,7 +343,7 @@
                         name.toHuman());
             }
 
-            out.writeUnsignedLeb128(nameIdx);
+            out.writeUleb128(nameIdx);
 
             if (annotates) {
                 out.annotate("    value: " + constantToHuman(value));
diff --git a/dx/src/com/android/dx/io/ClassData.java b/dx/src/com/android/dx/io/ClassData.java
new file mode 100644
index 0000000..5da7ddd
--- /dev/null
+++ b/dx/src/com/android/dx/io/ClassData.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+public final class ClassData {
+    private final Field[] staticFields;
+    private final Field[] instanceFields;
+    private final Method[] directMethods;
+    private final Method[] virtualMethods;
+
+    public ClassData(Field[] staticFields, Field[] instanceFields,
+            Method[] directMethods, Method[] virtualMethods) {
+        this.staticFields = staticFields;
+        this.instanceFields = instanceFields;
+        this.directMethods = directMethods;
+        this.virtualMethods = virtualMethods;
+    }
+
+    public Field[] getStaticFields() {
+        return staticFields;
+    }
+
+    public Field[] getInstanceFields() {
+        return instanceFields;
+    }
+
+    public Method[] getDirectMethods() {
+        return directMethods;
+    }
+
+    public Method[] getVirtualMethods() {
+        return virtualMethods;
+    }
+
+    public Field[] allFields() {
+        Field[] result = new Field[staticFields.length + instanceFields.length];
+        System.arraycopy(staticFields, 0, result, 0, staticFields.length);
+        System.arraycopy(instanceFields, 0, result, staticFields.length, instanceFields.length);
+        return result;
+    }
+
+    public Method[] allMethods() {
+        Method[] result = new Method[directMethods.length + virtualMethods.length];
+        System.arraycopy(directMethods, 0, result, 0, directMethods.length);
+        System.arraycopy(virtualMethods, 0, result, directMethods.length, virtualMethods.length);
+        return result;
+    }
+
+    public static class Field {
+        private final int fieldIndex;
+        private final int accessFlags;
+
+        public Field(int fieldIndex, int accessFlags) {
+            this.fieldIndex = fieldIndex;
+            this.accessFlags = accessFlags;
+        }
+
+        public int getFieldIndex() {
+            return fieldIndex;
+        }
+
+        public int getAccessFlags() {
+            return accessFlags;
+        }
+    }
+
+    public static class Method {
+        private final int methodIndex;
+        private final int accessFlags;
+        private final int codeOffset;
+
+        public Method(int methodIndex, int accessFlags, int codeOffset) {
+            this.methodIndex = methodIndex;
+            this.accessFlags = accessFlags;
+            this.codeOffset = codeOffset;
+        }
+
+        public int getMethodIndex() {
+            return methodIndex;
+        }
+
+        public int getAccessFlags() {
+            return accessFlags;
+        }
+
+        public int getCodeOffset() {
+            return codeOffset;
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/io/ClassDef.java b/dx/src/com/android/dx/io/ClassDef.java
new file mode 100644
index 0000000..5c8d10b
--- /dev/null
+++ b/dx/src/com/android/dx/io/ClassDef.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+/**
+ * A type definition.
+ */
+public final class ClassDef {
+    public static final int NO_INDEX = -1;
+    private final DexBuffer buffer;
+    private final int offset;
+    private final int typeIndex;
+    private final int accessFlags;
+    private final int supertypeIndex;
+    private final int interfacesOffset;
+    private final int sourceFileIndex;
+    private final int annotationsOffset;
+    private final int classDataOffset;
+    private final int staticValuesOffset;
+
+    public ClassDef(DexBuffer buffer, int offset, int typeIndex, int accessFlags,
+            int supertypeIndex, int interfacesOffset, int sourceFileIndex,
+            int annotationsOffset, int classDataOffset, int staticValuesOffset) {
+        this.buffer = buffer;
+        this.offset = offset;
+        this.typeIndex = typeIndex;
+        this.accessFlags = accessFlags;
+        this.supertypeIndex = supertypeIndex;
+        this.interfacesOffset = interfacesOffset;
+        this.sourceFileIndex = sourceFileIndex;
+        this.annotationsOffset = annotationsOffset;
+        this.classDataOffset = classDataOffset;
+        this.staticValuesOffset = staticValuesOffset;
+    }
+
+    public int getOffset() {
+        return offset;
+    }
+
+    public int getTypeIndex() {
+        return typeIndex;
+    }
+
+    public int getSupertypeIndex() {
+        return supertypeIndex;
+    }
+
+    public int getInterfacesOffset() {
+        return interfacesOffset;
+    }
+
+    public short[] getInterfaces() {
+        return buffer.readTypeList(interfacesOffset).getTypes();
+    }
+
+    public int getAccessFlags() {
+        return accessFlags;
+    }
+
+    public int getSourceFileIndex() {
+        return sourceFileIndex;
+    }
+
+    public int getAnnotationsOffset() {
+        return annotationsOffset;
+    }
+
+    public int getClassDataOffset() {
+        return classDataOffset;
+    }
+
+    public int getStaticValuesOffset() {
+        return staticValuesOffset;
+    }
+
+    @Override public String toString() {
+        if (buffer == null) {
+            return typeIndex + " " + supertypeIndex;
+        }
+
+        StringBuilder result = new StringBuilder();
+        result.append(buffer.typeNames().get(typeIndex));
+        if (supertypeIndex != NO_INDEX) {
+            result.append(" extends ").append(buffer.typeNames().get(supertypeIndex));
+        }
+        return result.toString();
+    }
+}
diff --git a/dx/src/com/android/dx/io/Code.java b/dx/src/com/android/dx/io/Code.java
new file mode 100644
index 0000000..81073f3
--- /dev/null
+++ b/dx/src/com/android/dx/io/Code.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+public final class Code {
+    private final short registersSize;
+    private final short insSize;
+    private final short outsSize;
+    private final int debugInfoOffset;
+    private final short[] instructions;
+    private final Try[] tries;
+    private final CatchHandler[] catchHandlers;
+
+    public Code(short registersSize, short insSize, short outsSize, int debugInfoOffset,
+            short[] instructions, Try[] tries, CatchHandler[] catchHandlers) {
+        this.registersSize = registersSize;
+        this.insSize = insSize;
+        this.outsSize = outsSize;
+        this.debugInfoOffset = debugInfoOffset;
+        this.instructions = instructions;
+        this.tries = tries;
+        this.catchHandlers = catchHandlers;
+    }
+
+    public short getRegistersSize() {
+        return registersSize;
+    }
+
+    public short getInsSize() {
+        return insSize;
+    }
+
+    public short getOutsSize() {
+        return outsSize;
+    }
+
+    public int getDebugInfoOffset() {
+        return debugInfoOffset;
+    }
+
+    public short[] getInstructions() {
+        return instructions;
+    }
+
+    public Try[] getTries() {
+        return tries;
+    }
+
+    public CatchHandler[] getCatchHandlers() {
+        return catchHandlers;
+    }
+
+    public static class Try {
+        final int startAddress;
+        final short instructionCount;
+        final short handlerOffset;
+
+        Try(int startAddress, short instructionCount, short handlerOffset) {
+            this.startAddress = startAddress;
+            this.instructionCount = instructionCount;
+            this.handlerOffset = handlerOffset;
+        }
+
+        public int getStartAddress() {
+            return startAddress;
+        }
+
+        public short getInstructionCount() {
+            return instructionCount;
+        }
+
+        public short getHandlerOffset() {
+            return handlerOffset;
+        }
+    }
+
+    public static class CatchHandler {
+        final int[] typeIndexes;
+        final int[] addresses;
+        final int catchAllAddress;
+
+        public CatchHandler(int[] typeIndexes, int[] addresses, int catchAllAddress) {
+            this.typeIndexes = typeIndexes;
+            this.addresses = addresses;
+            this.catchAllAddress = catchAllAddress;
+        }
+
+        public int[] getTypeIndexes() {
+            return typeIndexes;
+        }
+
+        public int[] getAddresses() {
+            return addresses;
+        }
+
+        public int getCatchAllAddress() {
+            return catchAllAddress;
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/io/CodeReader.java b/dx/src/com/android/dx/io/CodeReader.java
new file mode 100644
index 0000000..cab1063
--- /dev/null
+++ b/dx/src/com/android/dx/io/CodeReader.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.io.instructions.DecodedInstruction;
+import com.android.dx.util.DexException;
+
+/**
+ * Walks through a block of code and calls visitor call backs.
+ */
+public final class CodeReader {
+    private Visitor fallbackVisitor = null;
+    private Visitor stringVisitor = null;
+    private Visitor typeVisitor = null;
+    private Visitor fieldVisitor = null;
+    private Visitor methodVisitor = null;
+
+    /**
+     * Sets {@code visitor} as the visitor for all instructions.
+     */
+    public void setAllVisitors(Visitor visitor) {
+        fallbackVisitor = visitor;
+        stringVisitor = visitor;
+        typeVisitor = visitor;
+        fieldVisitor = visitor;
+        methodVisitor = visitor;
+    }
+
+    /**
+     * Sets {@code visitor} as the visitor for all instructions not
+     * otherwise handled.
+     */
+    public void setFallbackVisitor(Visitor visitor) {
+        fallbackVisitor = visitor;
+    }
+
+    /**
+     * Sets {@code visitor} as the visitor for all string instructions.
+     */
+    public void setStringVisitor(Visitor visitor) {
+        stringVisitor = visitor;
+    }
+
+    /**
+     * Sets {@code visitor} as the visitor for all type instructions.
+     */
+    public void setTypeVisitor(Visitor visitor) {
+        typeVisitor = visitor;
+    }
+
+    /**
+     * Sets {@code visitor} as the visitor for all field instructions.
+     */
+    public void setFieldVisitor(Visitor visitor) {
+        fieldVisitor = visitor;
+    }
+
+    /**
+     * Sets {@code visitor} as the visitor for all method instructions.
+     */
+    public void setMethodVisitor(Visitor visitor) {
+        methodVisitor = visitor;
+    }
+
+    public void visitAll(DecodedInstruction[] decodedInstructions)
+            throws DexException {
+        int size = decodedInstructions.length;
+
+        for (int i = 0; i < size; i++) {
+            DecodedInstruction one = decodedInstructions[i];
+            if (one == null) {
+                continue;
+            }
+
+            callVisit(decodedInstructions, one);
+        }
+    }
+
+    public void visitAll(short[] encodedInstructions) throws DexException {
+        DecodedInstruction[] decodedInstructions =
+            DecodedInstruction.decodeAll(encodedInstructions);
+        visitAll(decodedInstructions);
+    }
+
+    private void callVisit(DecodedInstruction[] all, DecodedInstruction one) {
+        Visitor visitor = null;
+
+        switch (OpcodeInfo.getIndexType(one.getOpcode())) {
+            case STRING_REF: visitor = stringVisitor; break;
+            case TYPE_REF:   visitor = typeVisitor;   break;
+            case FIELD_REF:  visitor = fieldVisitor;  break;
+            case METHOD_REF: visitor = methodVisitor; break;
+        }
+
+        if (visitor == null) {
+            visitor = fallbackVisitor;
+        }
+
+        if (visitor != null) {
+            visitor.visit(all, one);
+        }
+    }
+
+    public interface Visitor {
+        void visit(DecodedInstruction[] all, DecodedInstruction one);
+    }
+}
diff --git a/dx/src/com/android/dx/io/DexBuffer.java b/dx/src/com/android/dx/io/DexBuffer.java
new file mode 100644
index 0000000..cedb0aa
--- /dev/null
+++ b/dx/src/com/android/dx/io/DexBuffer.java
@@ -0,0 +1,614 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.dex.SizeOf;
+import com.android.dx.dex.TableOfContents;
+import com.android.dx.merge.TypeList;
+import com.android.dx.util.DexException;
+import com.android.dx.util.Leb128Utils;
+import com.android.dx.util.Mutf8;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.AbstractList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+/**
+ * The bytes of a dex file in memory for reading and writing. All int offsets
+ * are unsigned.
+ */
+public final class DexBuffer {
+    private byte[] data;
+    private final TableOfContents tableOfContents = new TableOfContents();
+    private int length;
+
+    private final List<String> strings = new AbstractList<String>() {
+        @Override public String get(int index) {
+            checkBounds(index, tableOfContents.stringIds.size);
+            return open(tableOfContents.stringIds.off + (index * SizeOf.STRING_ID_ITEM))
+                    .readString();
+        }
+        @Override public int size() {
+            return tableOfContents.stringIds.size;
+        }
+    };
+
+    private final List<Integer> typeIds = new AbstractList<Integer>() {
+        @Override public Integer get(int index) {
+            checkBounds(index, tableOfContents.typeIds.size);
+            return open(tableOfContents.typeIds.off + (index * SizeOf.TYPE_ID_ITEM)).readInt();
+        }
+        @Override public int size() {
+            return tableOfContents.typeIds.size;
+        }
+    };
+
+    private final List<String> typeNames = new AbstractList<String>() {
+        @Override public String get(int index) {
+            checkBounds(index, tableOfContents.typeIds.size);
+            return strings.get(typeIds.get(index));
+        }
+        @Override public int size() {
+            return tableOfContents.typeIds.size;
+        }
+    };
+
+    private final List<ProtoId> protoIds = new AbstractList<ProtoId>() {
+        @Override public ProtoId get(int index) {
+            checkBounds(index, tableOfContents.protoIds.size);
+            return open(tableOfContents.protoIds.off + (SizeOf.PROTO_ID_ITEM * index))
+                    .readProtoId();
+        }
+        @Override public int size() {
+            return tableOfContents.protoIds.size;
+        }
+    };
+
+    private final List<FieldId> fieldIds = new AbstractList<FieldId>() {
+        @Override public FieldId get(int index) {
+            checkBounds(index, tableOfContents.fieldIds.size);
+            return open(tableOfContents.fieldIds.off + (SizeOf.MEMBER_ID_ITEM * index))
+                    .readFieldId();
+        }
+        @Override public int size() {
+            return tableOfContents.fieldIds.size;
+        }
+    };
+
+    private final List<MethodId> methodIds = new AbstractList<MethodId>() {
+        @Override public MethodId get(int index) {
+            checkBounds(index, tableOfContents.methodIds.size);
+            return open(tableOfContents.methodIds.off + (SizeOf.MEMBER_ID_ITEM * index))
+                    .readMethodId();
+        }
+        @Override public int size() {
+            return tableOfContents.methodIds.size;
+        }
+    };
+
+    private static void checkBounds(int index, int length) {
+        if (index < 0 || index >= length) {
+            throw new IndexOutOfBoundsException("index:" + index + ", length=" + length);
+        }
+    }
+
+    public void loadFrom(InputStream in) throws IOException {
+        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
+        byte[] buffer = new byte[8192];
+
+        int count;
+        while ((count = in.read(buffer)) != -1) {
+            bytesOut.write(buffer, 0, count);
+        }
+
+        this.data = bytesOut.toByteArray();
+        this.length = data.length;
+        this.tableOfContents.readFrom(this);
+    }
+
+    public void loadFrom(File file) throws IOException {
+        InputStream in = new FileInputStream(file);
+        loadFrom(in);
+        in.close();
+    }
+
+    public void writeTo(OutputStream out) throws IOException {
+        out.write(data);
+    }
+
+    public void writeTo(File dexOut) throws IOException {
+        OutputStream out = new FileOutputStream(dexOut);
+        writeTo(out);
+        out.close();
+    }
+
+    public TableOfContents getTableOfContents() {
+        return tableOfContents;
+    }
+
+    public Section open(int position) {
+        if (position < 0 || position > length) {
+            throw new IllegalArgumentException("position=" + position + " length=" + length);
+        }
+        return new Section(position);
+    }
+
+    public Section appendSection(int maxByteCount, String name) {
+        Section result = new Section(name, length, length + maxByteCount);
+        length = fourByteAlign(length + maxByteCount);
+        return result;
+    }
+
+    public void noMoreSections() {
+        data = new byte[length];
+    }
+
+    public int getLength() {
+        return length;
+    }
+
+    private static int fourByteAlign(int position) {
+        return (position + 3) & ~3;
+    }
+
+    public byte[] getBytes() {
+        return data;
+    }
+
+    public List<String> strings() {
+        return strings;
+    }
+
+    public List<Integer> typeIds() {
+        return typeIds;
+    }
+
+    public List<String> typeNames() {
+        return typeNames;
+    }
+
+    public List<ProtoId> protoIds() {
+        return protoIds;
+    }
+
+    public List<FieldId> fieldIds() {
+        return fieldIds;
+    }
+
+    public List<MethodId> methodIds() {
+        return methodIds;
+    }
+
+    public Iterable<ClassDef> classDefs() {
+        return new Iterable<ClassDef>() {
+            public Iterator<ClassDef> iterator() {
+                return new Iterator<ClassDef>() {
+                    private DexBuffer.Section in = open(tableOfContents.classDefs.off);
+                    private int count = 0;
+
+                    public boolean hasNext() {
+                        return count < tableOfContents.classDefs.size;
+                    }
+                    public ClassDef next() {
+                        if (!hasNext()) {
+                            throw new NoSuchElementException();
+                        }
+                        count++;
+                        return in.readClassDef();
+                    }
+                    public void remove() {
+                        throw new UnsupportedOperationException();
+                    }
+                };
+            }
+        };
+    }
+
+    public TypeList readTypeList(int offset) {
+        if (offset == 0) {
+            return TypeList.EMPTY;
+        }
+        return open(offset).readTypeList();
+    }
+
+    public ClassData readClassData(ClassDef classDef) {
+        int offset = classDef.getClassDataOffset();
+        if (offset == 0) {
+            throw new IllegalArgumentException("offset == 0");
+        }
+        return open(offset).readClassData();
+    }
+
+    public Code readCode(ClassData.Method method) {
+        int offset = method.getCodeOffset();
+        if (offset == 0) {
+            throw new IllegalArgumentException("offset == 0");
+        }
+        return open(offset).readCode();
+    }
+
+    public final class Section {
+        private final String name;
+        private int position;
+        private final int limit;
+
+        private final DataInput asDataInput = new DataInputStub() {
+            public byte readByte() {
+                return Section.this.readByte();
+            }
+        };
+
+        private Section(String name, int position, int limit) {
+            this.name = name;
+            this.position = position;
+            this.limit = limit;
+        }
+
+        private Section(int position) {
+            this("section", position, data.length);
+        }
+
+        public int getPosition() {
+            return position;
+        }
+
+        public int readInt() {
+            int result = (data[position] & 0xff)
+                    | (data[position + 1] & 0xff) << 8
+                    | (data[position + 2] & 0xff) << 16
+                    | (data[position + 3] & 0xff) << 24;
+            position += 4;
+            return result;
+        }
+
+        public short readShort() {
+            int result = (data[position] & 0xff)
+                    | (data[position + 1] & 0xff) << 8;
+            position += 2;
+            return (short) result;
+        }
+
+        public byte readByte() {
+            return (byte) (data[position++] & 0xff);
+        }
+
+        public byte[] readByteArray(int length) {
+            byte[] result = Arrays.copyOfRange(data, position, position + length);
+            position += length;
+            return result;
+        }
+
+        public short[] readShortArray(int length) {
+            short[] result = new short[length];
+            for (int i = 0; i < length; i++) {
+                result[i] = readShort();
+            }
+            return result;
+        }
+
+        public int readUleb128() {
+            try {
+                return Leb128Utils.readUnsignedLeb128(asDataInput);
+            } catch (IOException e) {
+                throw new DexException(e);
+            }
+        }
+
+        public int readSleb128() {
+            try {
+                return Leb128Utils.readSignedLeb128(asDataInput);
+            } catch (IOException e) {
+                throw new DexException(e);
+            }
+        }
+
+        public TypeList readTypeList() {
+            int size = readInt();
+            short[] types = new short[size];
+            for (int i = 0; i < size; i++) {
+                types[i] = readShort();
+            }
+            alignToFourBytes();
+            return new TypeList(DexBuffer.this, types);
+        }
+
+        public String readString() {
+            int offset = readInt();
+            int savedPosition = position;
+            position = offset;
+            try {
+                int expectedLength = readUleb128();
+                String result = Mutf8.decode(asDataInput, new char[expectedLength]);
+                if (result.length() != expectedLength) {
+                    throw new DexException("Declared length " + expectedLength
+                            + " doesn't match decoded length of " + result.length());
+                }
+                return result;
+            } catch (IOException e) {
+                throw new DexException(e);
+            } finally {
+                position = savedPosition;
+            }
+        }
+
+        public FieldId readFieldId() {
+            short declaringClassIndex = readShort();
+            short typeIndex = readShort();
+            int nameIndex = readInt();
+            return new FieldId(DexBuffer.this, declaringClassIndex, typeIndex, nameIndex);
+        }
+
+        public MethodId readMethodId() {
+            short declaringClassIndex = readShort();
+            short protoIndex = readShort();
+            int nameIndex = readInt();
+            return new MethodId(DexBuffer.this, declaringClassIndex, protoIndex, nameIndex);
+        }
+
+        public ProtoId readProtoId() {
+            int shortyIndex = readInt();
+            int returnTypeIndex = readInt();
+            int parametersOffset = readInt();
+            return new ProtoId(DexBuffer.this, shortyIndex, returnTypeIndex, parametersOffset);
+        }
+
+        public ClassDef readClassDef() {
+            int offset = getPosition();
+            int type = readInt();
+            int accessFlags = readInt();
+            int supertype = readInt();
+            int interfacesOffset = readInt();
+            int sourceFileIndex = readInt();
+            int annotationsOffset = readInt();
+            int classDataOffset = readInt();
+            int staticValuesOffset = readInt();
+            return new ClassDef(DexBuffer.this, offset, type, accessFlags, supertype,
+                    interfacesOffset, sourceFileIndex, annotationsOffset, classDataOffset,
+                    staticValuesOffset);
+        }
+
+        private Code readCode() {
+            short registersSize = readShort();
+            short insSize = readShort();
+            short outsSize = readShort();
+            short triesSize = readShort();
+            int debugInfoOffset = readInt();
+            int instructionsSize = readInt();
+            short[] instructions = readShortArray(instructionsSize);
+            Code.Try[] tries = new Code.Try[triesSize];
+            Code.CatchHandler[] catchHandlers = new Code.CatchHandler[0];
+            if (triesSize > 0) {
+                if (instructions.length % 2 == 1) {
+                    readShort(); // padding
+                }
+
+                for (int i = 0; i < triesSize; i++) {
+                    int startAddress = readInt();
+                    short instructionCount = readShort();
+                    short handlerOffset = readShort();
+                    tries[i] = new Code.Try(startAddress, instructionCount, handlerOffset);
+                }
+
+                int catchHandlersSize = readUleb128();
+                catchHandlers = new Code.CatchHandler[catchHandlersSize];
+                for (int i = 0; i < catchHandlersSize; i++) {
+                    catchHandlers[i] = readCatchHandler();
+                }
+            }
+            return new Code(registersSize, insSize, outsSize, debugInfoOffset, instructions,
+                    tries, catchHandlers);
+        }
+
+        private Code.CatchHandler readCatchHandler() {
+            int size = readSleb128();
+            int handlersCount = Math.abs(size);
+            int[] typeIndexes = new int[handlersCount];
+            int[] addresses = new int[handlersCount];
+            for (int i = 0; i < handlersCount; i++) {
+                typeIndexes[i] = readUleb128();
+                addresses[i] = readUleb128();
+            }
+            int catchAllAddress = size <= 0 ? readUleb128() : -1;
+            return new Code.CatchHandler(typeIndexes, addresses, catchAllAddress);
+        }
+
+        private ClassData readClassData() {
+            int staticFieldsSize = readUleb128();
+            int instanceFieldsSize = readUleb128();
+            int directMethodsSize = readUleb128();
+            int virtualMethodsSize = readUleb128();
+            ClassData.Field[] staticFields = readFields(staticFieldsSize);
+            ClassData.Field[] instanceFields = readFields(instanceFieldsSize);
+            ClassData.Method[] directMethods = readMethods(directMethodsSize);
+            ClassData.Method[] virtualMethods = readMethods(virtualMethodsSize);
+            return new ClassData(staticFields, instanceFields, directMethods, virtualMethods);
+        }
+
+        private ClassData.Field[] readFields(int count) {
+            ClassData.Field[] result = new ClassData.Field[count];
+            int fieldIndex = 0;
+            for (int i = 0; i < count; i++) {
+                fieldIndex += readUleb128(); // field index diff
+                int accessFlags = readUleb128();
+                result[i] = new ClassData.Field(fieldIndex, accessFlags);
+            }
+            return result;
+        }
+
+        private ClassData.Method[] readMethods(int count) {
+            ClassData.Method[] result = new ClassData.Method[count];
+            int methodIndex = 0;
+            for (int i = 0; i < count; i++) {
+                methodIndex += readUleb128(); // method index diff
+                int accessFlags = readUleb128();
+                int codeOff = readUleb128();
+                result[i] = new ClassData.Method(methodIndex, accessFlags, codeOff);
+            }
+            return result;
+        }
+
+        private void ensureCapacity(int size) {
+            if (position + size > limit) {
+                throw new DexException("Section limit " + limit + " exceeded by " + name);
+            }
+        }
+
+        /**
+         * Writes 0x00 until the position is aligned to a multiple of 4.
+         */
+        public void alignToFourBytes() {
+            int unalignedCount = position;
+            position = DexBuffer.fourByteAlign(position);
+            for (int i = unalignedCount; i < position; i++) {
+                data[i] = 0;
+            }
+        }
+
+        public void assertFourByteAligned() {
+            if ((position & 3) != 0) {
+                throw new IllegalStateException("Not four byte aligned!");
+            }
+        }
+
+        public void write(byte[] bytes) {
+            ensureCapacity(bytes.length);
+            System.arraycopy(bytes, 0, data, position, bytes.length);
+            position += bytes.length;
+        }
+
+        public void writeByte(int b) {
+            ensureCapacity(1);
+            data[position++] = (byte) b;
+        }
+
+        public void writeShort(short i) {
+            ensureCapacity(2);
+            data[position    ] = (byte) i;
+            data[position + 1] = (byte) (i >>> 8);
+            position += 2;
+        }
+
+        public void write(short[] shorts) {
+            for (short s : shorts) {
+                writeShort(s);
+            }
+        }
+
+        public void writeInt(int i) {
+            ensureCapacity(4);
+            data[position    ] = (byte) i;
+            data[position + 1] = (byte) (i >>>  8);
+            data[position + 2] = (byte) (i >>> 16);
+            data[position + 3] = (byte) (i >>> 24);
+            position += 4;
+        }
+
+        public void writeUleb128(int i) {
+            position += Leb128Utils.writeUnsignedLeb128(data, position, i);
+            ensureCapacity(0);
+        }
+
+        public void writeSleb128(int i) {
+            position += Leb128Utils.writeSignedLeb128(data, position, i);
+            ensureCapacity(0);
+        }
+
+        public void writeStringData(String value) {
+            try {
+                int length = value.length();
+                writeUleb128(length);
+                write(Mutf8.encode(value));
+                writeByte(0);
+            } catch (IOException e) {
+                throw new AssertionError();
+            }
+        }
+
+        public void writeTypeList(TypeList typeList) {
+            short[] types = typeList.getTypes();
+            writeInt(types.length);
+            for (short type : types) {
+                writeShort(type);
+            }
+            alignToFourBytes();
+        }
+
+        /**
+         * Returns the number of bytes remaining in this section.
+         */
+        public int remaining() {
+            return limit - position;
+        }
+    }
+
+    private static class DataInputStub implements DataInput {
+        public byte readByte() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public void readFully(byte[] buffer) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public void readFully(byte[] buffer, int offset, int count) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public int skipBytes(int i) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public boolean readBoolean() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public int readUnsignedByte() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public short readShort() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public int readUnsignedShort() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public char readChar() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public int readInt() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public long readLong() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public float readFloat() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public double readDouble() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public String readLine() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public String readUTF() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/io/DexHasher.java b/dx/src/com/android/dx/io/DexHasher.java
new file mode 100644
index 0000000..416b3e2
--- /dev/null
+++ b/dx/src/com/android/dx/io/DexHasher.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import java.io.IOException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.zip.Adler32;
+
+/**
+ * Generates and stores the checksum and signature of a dex file.
+ */
+public final class DexHasher {
+    private static final int CHECKSUM_OFFSET = 8;
+    private static final int CHECKSUM_SIZE = 4;
+    private static final int SIGNATURE_OFFSET = CHECKSUM_OFFSET + CHECKSUM_SIZE;
+    private static final int SIGNATURE_SIZE = 20;
+
+    /**
+     * Returns the signature of all but the first 32 bytes of {@code dex}. The
+     * first 32 bytes of dex files are not specified to be included in the
+     * signature.
+     */
+    public byte[] computeSignature(DexBuffer dex) throws IOException {
+        MessageDigest digest;
+        try {
+            digest = MessageDigest.getInstance("SHA-1");
+        } catch (NoSuchAlgorithmException e) {
+            throw new AssertionError();
+        }
+        int offset = SIGNATURE_OFFSET + SIGNATURE_SIZE;
+
+        byte[] bytes = dex.getBytes();
+        digest.update(bytes, offset, bytes.length - offset);
+        return digest.digest();
+    }
+
+    /**
+     * Returns the checksum of all but the first 12 bytes of {@code dex}.
+     */
+    public int computeChecksum(DexBuffer dex) throws IOException {
+        Adler32 adler32 = new Adler32();
+        int offset = CHECKSUM_OFFSET + CHECKSUM_SIZE;
+
+        byte[] bytes = dex.getBytes();
+        adler32.update(bytes, offset, bytes.length - offset);
+        return (int) adler32.getValue();
+    }
+
+    /**
+     * Generates the signature and checksum of the dex file {@code out} and
+     * writes them to the file.
+     */
+    public void writeHashes(DexBuffer dex) throws IOException {
+        byte[] signature = computeSignature(dex);
+        dex.open(SIGNATURE_OFFSET).write(signature);
+
+        int checksum = computeChecksum(dex);
+        dex.open(CHECKSUM_OFFSET).writeInt(checksum);
+    }
+}
diff --git a/dx/src/com/android/dx/io/DexIndexPrinter.java b/dx/src/com/android/dx/io/DexIndexPrinter.java
new file mode 100644
index 0000000..a6040f1
--- /dev/null
+++ b/dx/src/com/android/dx/io/DexIndexPrinter.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.dex.TableOfContents;
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Executable that prints all indices of a dex file.
+ */
+public final class DexIndexPrinter {
+    private final DexBuffer dexBuffer;
+    private final TableOfContents tableOfContents;
+
+    public DexIndexPrinter(File file) throws IOException {
+        this.dexBuffer = new DexBuffer();
+        this.dexBuffer.loadFrom(file);
+        this.tableOfContents = dexBuffer.getTableOfContents();
+    }
+
+    private void printMap() {
+        for (TableOfContents.Section section : tableOfContents.sections) {
+            if (section.off != -1) {
+                System.out.println("section " + Integer.toHexString(section.type)
+                        + " off=" + Integer.toHexString(section.off)
+                        + " size=" + Integer.toHexString(section.size)
+                        + " byteCount=" + Integer.toHexString(section.byteCount));
+            }
+        }
+    }
+
+    private void printStrings() throws IOException {
+        int index = 0;
+        for (String string : dexBuffer.strings()) {
+            System.out.println("string " + index + ": " + string);
+            index++;
+        }
+    }
+
+    private void printTypeIds() throws IOException {
+        int index = 0;
+        for (Integer type : dexBuffer.typeIds()) {
+            System.out.println("type " + index + ": " + dexBuffer.strings().get(type));
+            index++;
+        }
+    }
+
+    private void printProtoIds() throws IOException {
+        int index = 0;
+        for (ProtoId protoId : dexBuffer.protoIds()) {
+            System.out.println("proto " + index + ": " + protoId);
+            index++;
+        }
+    }
+
+    private void printFieldIds() throws IOException {
+        int index = 0;
+        for (FieldId fieldId : dexBuffer.fieldIds()) {
+            System.out.println("field " + index + ": " + fieldId);
+            index++;
+        }
+    }
+
+    private void printMethodIds() throws IOException {
+        int index = 0;
+        for (MethodId methodId : dexBuffer.methodIds()) {
+            System.out.println("methodId " + index + ": " + methodId);
+            index++;
+        }
+    }
+
+    private void printTypeLists() throws IOException {
+        if (tableOfContents.typeLists.off == -1) {
+            System.out.println("No type lists");
+            return;
+        }
+        DexBuffer.Section in = dexBuffer.open(tableOfContents.typeLists.off);
+        for (int i = 0; i < tableOfContents.typeLists.size; i++) {
+            int size = in.readInt();
+            System.out.print("Type list i=" + i + ", size=" + size + ", elements=");
+            for (int t = 0; t < size; t++) {
+                System.out.print(" " + dexBuffer.typeNames().get((int) in.readShort()));
+            }
+            if (size % 2 == 1) {
+                in.readShort(); // retain alignment
+            }
+            System.out.println();
+        }
+    }
+
+    private void printClassDefs() {
+        int index = 0;
+        for (ClassDef classDef : dexBuffer.classDefs()) {
+            System.out.println("class def " + index + ": " + classDef);
+            index++;
+        }
+    }
+
+    public static void main(String[] args) throws IOException {
+        DexIndexPrinter indexPrinter = new DexIndexPrinter(new File(args[0]));
+        indexPrinter.printMap();
+        indexPrinter.printStrings();
+        indexPrinter.printTypeIds();
+        indexPrinter.printProtoIds();
+        indexPrinter.printFieldIds();
+        indexPrinter.printMethodIds();
+        indexPrinter.printTypeLists();
+        indexPrinter.printClassDefs();
+    }
+}
diff --git a/dx/src/com/android/dx/io/EncodedValueReader.java b/dx/src/com/android/dx/io/EncodedValueReader.java
new file mode 100644
index 0000000..6c913a1
--- /dev/null
+++ b/dx/src/com/android/dx/io/EncodedValueReader.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+/**
+ * SAX-style reader for encoded values.
+ * TODO: convert this to a pull-style reader
+ */
+public class EncodedValueReader {
+    public static final int ENCODED_BYTE = 0x00;
+    public static final int ENCODED_SHORT = 0x02;
+    public static final int ENCODED_CHAR = 0x03;
+    public static final int ENCODED_INT = 0x04;
+    public static final int ENCODED_LONG = 0x06;
+    public static final int ENCODED_FLOAT = 0x10;
+    public static final int ENCODED_DOUBLE = 0x11;
+    public static final int ENCODED_STRING = 0x17;
+    public static final int ENCODED_TYPE = 0x18;
+    public static final int ENCODED_FIELD = 0x19;
+    public static final int ENCODED_ENUM = 0x1b;
+    public static final int ENCODED_METHOD = 0x1a;
+    public static final int ENCODED_ARRAY = 0x1c;
+    public static final int ENCODED_ANNOTATION = 0x1d;
+    public static final int ENCODED_NULL = 0x1e;
+    public static final int ENCODED_BOOLEAN = 0x1f;
+
+    protected final DexBuffer.Section in;
+
+    public EncodedValueReader(DexBuffer.Section in) {
+        this.in = in;
+    }
+
+    public final void readArray() {
+        int size = in.readUleb128();
+        visitArray(size);
+
+        for (int i = 0; i < size; i++) {
+            readValue();
+        }
+    }
+
+    public final void readAnnotation() {
+        int typeIndex = in.readUleb128();
+        int size = in.readUleb128();
+        visitAnnotation(typeIndex, size);
+
+        for (int i = 0; i < size; i++) {
+            visitAnnotationName(in.readUleb128());
+            readValue();
+        }
+    }
+
+    public final void readValue() {
+        int argAndType = in.readByte() & 0xff;
+        int type = argAndType & 0x1f;
+        int arg = (argAndType & 0xe0) >> 5;
+        int size = arg + 1;
+
+        switch (type) {
+        case ENCODED_BYTE:
+        case ENCODED_SHORT:
+        case ENCODED_CHAR:
+        case ENCODED_INT:
+        case ENCODED_LONG:
+        case ENCODED_FLOAT:
+        case ENCODED_DOUBLE:
+            visitPrimitive(argAndType, type, arg, size);
+            break;
+        case ENCODED_STRING:
+            visitString(type, readIndex(in, size));
+            break;
+        case ENCODED_TYPE:
+            visitType(type, readIndex(in, size));
+            break;
+        case ENCODED_FIELD:
+        case ENCODED_ENUM:
+            visitField(type, readIndex(in, size));
+            break;
+        case ENCODED_METHOD:
+            visitMethod(type, readIndex(in, size));
+            break;
+        case ENCODED_ARRAY:
+            visitArrayValue(argAndType);
+            readArray();
+            break;
+        case ENCODED_ANNOTATION:
+            visitAnnotationValue(argAndType);
+            readAnnotation();
+            break;
+        case ENCODED_NULL:
+            visitEncodedNull(argAndType);
+            break;
+        case ENCODED_BOOLEAN:
+            visitEncodedBoolean(argAndType);
+            break;
+        }
+    }
+
+    protected void visitArray(int size) {}
+    protected void visitAnnotation(int typeIndex, int size) {}
+    protected void visitAnnotationName(int nameIndex) {}
+    protected void visitPrimitive(int argAndType, int type, int arg, int size) {
+        for (int i = 0; i < size; i++) {
+            in.readByte();
+        }
+    }
+    protected void visitString(int type, int index) {}
+    protected void visitType(int type, int index) {}
+    protected void visitField(int type, int index) {}
+    protected void visitMethod(int type, int index) {}
+    protected void visitArrayValue(int argAndType) {}
+    protected void visitAnnotationValue(int argAndType) {}
+    protected void visitEncodedBoolean(int argAndType) {}
+    protected void visitEncodedNull(int argAndType) {}
+
+    private int readIndex(DexBuffer.Section in, int byteCount) {
+        int result = 0;
+        int shift = 0;
+        for (int i = 0; i < byteCount; i++) {
+            result += (in.readByte() & 0xff) << shift;
+            shift += 8;
+        }
+        return result;
+    }
+}
diff --git a/dx/src/com/android/dx/io/FieldId.java b/dx/src/com/android/dx/io/FieldId.java
new file mode 100644
index 0000000..ab481e0
--- /dev/null
+++ b/dx/src/com/android/dx/io/FieldId.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.util.Unsigned;
+
+public final class FieldId implements Comparable<FieldId> {
+    private final DexBuffer buffer;
+    private final short declaringClassIndex;
+    private final short typeIndex;
+    private final int nameIndex;
+
+    public FieldId(DexBuffer buffer, short declaringClassIndex, short typeIndex, int nameIndex) {
+        this.buffer = buffer;
+        this.declaringClassIndex = declaringClassIndex;
+        this.typeIndex = typeIndex;
+        this.nameIndex = nameIndex;
+    }
+
+    public short getDeclaringClassIndex() {
+        return declaringClassIndex;
+    }
+
+    public short getTypeIndex() {
+        return typeIndex;
+    }
+
+    public int getNameIndex() {
+        return nameIndex;
+    }
+
+    public int compareTo(FieldId other) {
+        if (declaringClassIndex != other.declaringClassIndex) {
+            return Unsigned.compare(declaringClassIndex, other.declaringClassIndex);
+        }
+        if (nameIndex != other.nameIndex) {
+            return Unsigned.compare(nameIndex, other.nameIndex);
+        }
+        return Unsigned.compare(typeIndex, other.typeIndex); // should always be 0
+    }
+
+    public void writeTo(DexBuffer.Section out) {
+        out.writeShort(declaringClassIndex);
+        out.writeShort(typeIndex);
+        out.writeInt(nameIndex);
+    }
+
+    @Override public String toString() {
+        if (buffer == null) {
+            return declaringClassIndex + " " + typeIndex + " " + nameIndex;
+        }
+        return buffer.typeNames().get(declaringClassIndex)
+                + " { " + buffer.typeNames().get(typeIndex)
+                + " " + buffer.strings().get(nameIndex) + " }";
+    }
+}
diff --git a/dx/src/com/android/dx/io/IndexType.java b/dx/src/com/android/dx/io/IndexType.java
new file mode 100644
index 0000000..bbddfa8
--- /dev/null
+++ b/dx/src/com/android/dx/io/IndexType.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+/**
+ * The various types that an index in a Dalvik instruction might refer to.
+ */
+public enum IndexType {
+    /** "Unknown." Used for undefined opcodes. */
+    UNKNOWN,
+
+    /** no index used */
+    NONE,
+
+    /** "It depends." Used for {@code throw-verification-error}. */
+    VARIES,
+
+    /** type reference index */
+    TYPE_REF,
+
+    /** string reference index */
+    STRING_REF,
+
+    /** method reference index */
+    METHOD_REF,
+
+    /** field reference index */
+    FIELD_REF,
+
+    /** inline method index (for inline linked method invocations) */
+    INLINE_METHOD,
+
+    /** direct vtable offset (for static linked method invocations) */
+    VTABLE_OFFSET,
+
+    /** direct field offset (for static linked field accesses) */
+    FIELD_OFFSET;
+}
diff --git a/dx/src/com/android/dx/io/MethodId.java b/dx/src/com/android/dx/io/MethodId.java
new file mode 100644
index 0000000..2934497
--- /dev/null
+++ b/dx/src/com/android/dx/io/MethodId.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.util.Unsigned;
+
+public final class MethodId implements Comparable<MethodId> {
+    private final DexBuffer buffer;
+    private final short declaringClassIndex;
+    private final short protoIndex;
+    private final int nameIndex;
+
+    public MethodId(DexBuffer buffer, short declaringClassIndex, short protoIndex, int nameIndex) {
+        this.buffer = buffer;
+        this.declaringClassIndex = declaringClassIndex;
+        this.protoIndex = protoIndex;
+        this.nameIndex = nameIndex;
+    }
+
+    public short getDeclaringClassIndex() {
+        return declaringClassIndex;
+    }
+
+    public short getProtoIndex() {
+        return protoIndex;
+    }
+
+    public int getNameIndex() {
+        return nameIndex;
+    }
+
+    public int compareTo(MethodId other) {
+        if (declaringClassIndex != other.declaringClassIndex) {
+            return Unsigned.compare(declaringClassIndex, other.declaringClassIndex);
+        }
+        if (nameIndex != other.nameIndex) {
+            return Unsigned.compare(nameIndex, other.nameIndex);
+        }
+        return Unsigned.compare(protoIndex, other.protoIndex);
+    }
+
+    public void writeTo(DexBuffer.Section out) {
+        out.writeShort(declaringClassIndex);
+        out.writeShort(protoIndex);
+        out.writeInt(nameIndex);
+    }
+
+    @Override public String toString() {
+        if (buffer == null) {
+            return declaringClassIndex + " " + protoIndex + " " + nameIndex;
+        }
+        return buffer.typeNames().get(declaringClassIndex)
+                + " " + buffer.protoIds().get(protoIndex)
+                + " " + buffer.strings().get(nameIndex);
+    }
+}
diff --git a/dx/src/com/android/dx/io/OpcodeInfo.java b/dx/src/com/android/dx/io/OpcodeInfo.java
new file mode 100644
index 0000000..c8fcf25
--- /dev/null
+++ b/dx/src/com/android/dx/io/OpcodeInfo.java
@@ -0,0 +1,1460 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.io.instructions.InstructionCodec;
+import com.android.dx.util.Hex;
+
+/**
+ * Information about each Dalvik opcode.
+ */
+public final class OpcodeInfo {
+    /*
+     * TODO: Merge at least most of the info from the Dops class into
+     * this one.
+     */
+
+    /** non-null; array containing all the information */
+    private static final Info[] INFO;
+
+    /**
+     * pseudo-opcode used for nonstandard formatted "instructions"
+     * (which are mostly not actually instructions, though they do
+     * appear in instruction lists). TODO: Retire the usage of this
+     * constant.
+     */
+    public static final Info SPECIAL_FORMAT =
+        new Info(Opcodes.SPECIAL_FORMAT, "<special>",
+                InstructionCodec.FORMAT_00X, IndexType.NONE);
+
+    // TODO: These payload opcodes should be generated by opcode-gen.
+
+    public static final Info PACKED_SWITCH_PAYLOAD =
+        new Info(Opcodes.PACKED_SWITCH_PAYLOAD, "packed-switch-payload",
+                InstructionCodec.FORMAT_PACKED_SWITCH_PAYLOAD,
+                IndexType.NONE);
+
+    public static final Info SPARSE_SWITCH_PAYLOAD =
+        new Info(Opcodes.SPARSE_SWITCH_PAYLOAD, "sparse-switch-payload",
+                InstructionCodec.FORMAT_SPARSE_SWITCH_PAYLOAD,
+                IndexType.NONE);
+
+    public static final Info FILL_ARRAY_DATA_PAYLOAD =
+        new Info(Opcodes.FILL_ARRAY_DATA_PAYLOAD, "fill-array-data-payload",
+                InstructionCodec.FORMAT_FILL_ARRAY_DATA_PAYLOAD,
+                IndexType.NONE);
+
+    // BEGIN(opcode-info-defs); GENERATED AUTOMATICALLY BY opcode-gen
+    public static final Info NOP =
+        new Info(Opcodes.NOP, "nop",
+            InstructionCodec.FORMAT_10X, IndexType.NONE);
+
+    public static final Info MOVE =
+        new Info(Opcodes.MOVE, "move",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MOVE_FROM16 =
+        new Info(Opcodes.MOVE_FROM16, "move/from16",
+            InstructionCodec.FORMAT_22X, IndexType.NONE);
+
+    public static final Info MOVE_16 =
+        new Info(Opcodes.MOVE_16, "move/16",
+            InstructionCodec.FORMAT_32X, IndexType.NONE);
+
+    public static final Info MOVE_WIDE =
+        new Info(Opcodes.MOVE_WIDE, "move-wide",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MOVE_WIDE_FROM16 =
+        new Info(Opcodes.MOVE_WIDE_FROM16, "move-wide/from16",
+            InstructionCodec.FORMAT_22X, IndexType.NONE);
+
+    public static final Info MOVE_WIDE_16 =
+        new Info(Opcodes.MOVE_WIDE_16, "move-wide/16",
+            InstructionCodec.FORMAT_32X, IndexType.NONE);
+
+    public static final Info MOVE_OBJECT =
+        new Info(Opcodes.MOVE_OBJECT, "move-object",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MOVE_OBJECT_FROM16 =
+        new Info(Opcodes.MOVE_OBJECT_FROM16, "move-object/from16",
+            InstructionCodec.FORMAT_22X, IndexType.NONE);
+
+    public static final Info MOVE_OBJECT_16 =
+        new Info(Opcodes.MOVE_OBJECT_16, "move-object/16",
+            InstructionCodec.FORMAT_32X, IndexType.NONE);
+
+    public static final Info MOVE_RESULT =
+        new Info(Opcodes.MOVE_RESULT, "move-result",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info MOVE_RESULT_WIDE =
+        new Info(Opcodes.MOVE_RESULT_WIDE, "move-result-wide",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info MOVE_RESULT_OBJECT =
+        new Info(Opcodes.MOVE_RESULT_OBJECT, "move-result-object",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info MOVE_EXCEPTION =
+        new Info(Opcodes.MOVE_EXCEPTION, "move-exception",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info RETURN_VOID =
+        new Info(Opcodes.RETURN_VOID, "return-void",
+            InstructionCodec.FORMAT_10X, IndexType.NONE);
+
+    public static final Info RETURN =
+        new Info(Opcodes.RETURN, "return",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info RETURN_WIDE =
+        new Info(Opcodes.RETURN_WIDE, "return-wide",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info RETURN_OBJECT =
+        new Info(Opcodes.RETURN_OBJECT, "return-object",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info CONST_4 =
+        new Info(Opcodes.CONST_4, "const/4",
+            InstructionCodec.FORMAT_11N, IndexType.NONE);
+
+    public static final Info CONST_16 =
+        new Info(Opcodes.CONST_16, "const/16",
+            InstructionCodec.FORMAT_21S, IndexType.NONE);
+
+    public static final Info CONST =
+        new Info(Opcodes.CONST, "const",
+            InstructionCodec.FORMAT_31I, IndexType.NONE);
+
+    public static final Info CONST_HIGH16 =
+        new Info(Opcodes.CONST_HIGH16, "const/high16",
+            InstructionCodec.FORMAT_21H, IndexType.NONE);
+
+    public static final Info CONST_WIDE_16 =
+        new Info(Opcodes.CONST_WIDE_16, "const-wide/16",
+            InstructionCodec.FORMAT_21S, IndexType.NONE);
+
+    public static final Info CONST_WIDE_32 =
+        new Info(Opcodes.CONST_WIDE_32, "const-wide/32",
+            InstructionCodec.FORMAT_31I, IndexType.NONE);
+
+    public static final Info CONST_WIDE =
+        new Info(Opcodes.CONST_WIDE, "const-wide",
+            InstructionCodec.FORMAT_51L, IndexType.NONE);
+
+    public static final Info CONST_WIDE_HIGH16 =
+        new Info(Opcodes.CONST_WIDE_HIGH16, "const-wide/high16",
+            InstructionCodec.FORMAT_21H, IndexType.NONE);
+
+    public static final Info CONST_STRING =
+        new Info(Opcodes.CONST_STRING, "const-string",
+            InstructionCodec.FORMAT_21C, IndexType.STRING_REF);
+
+    public static final Info CONST_STRING_JUMBO =
+        new Info(Opcodes.CONST_STRING_JUMBO, "const-string/jumbo",
+            InstructionCodec.FORMAT_31C, IndexType.STRING_REF);
+
+    public static final Info CONST_CLASS =
+        new Info(Opcodes.CONST_CLASS, "const-class",
+            InstructionCodec.FORMAT_21C, IndexType.TYPE_REF);
+
+    public static final Info MONITOR_ENTER =
+        new Info(Opcodes.MONITOR_ENTER, "monitor-enter",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info MONITOR_EXIT =
+        new Info(Opcodes.MONITOR_EXIT, "monitor-exit",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info CHECK_CAST =
+        new Info(Opcodes.CHECK_CAST, "check-cast",
+            InstructionCodec.FORMAT_21C, IndexType.TYPE_REF);
+
+    public static final Info INSTANCE_OF =
+        new Info(Opcodes.INSTANCE_OF, "instance-of",
+            InstructionCodec.FORMAT_22C, IndexType.TYPE_REF);
+
+    public static final Info ARRAY_LENGTH =
+        new Info(Opcodes.ARRAY_LENGTH, "array-length",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NEW_INSTANCE =
+        new Info(Opcodes.NEW_INSTANCE, "new-instance",
+            InstructionCodec.FORMAT_21C, IndexType.TYPE_REF);
+
+    public static final Info NEW_ARRAY =
+        new Info(Opcodes.NEW_ARRAY, "new-array",
+            InstructionCodec.FORMAT_22C, IndexType.TYPE_REF);
+
+    public static final Info FILLED_NEW_ARRAY =
+        new Info(Opcodes.FILLED_NEW_ARRAY, "filled-new-array",
+            InstructionCodec.FORMAT_35C, IndexType.TYPE_REF);
+
+    public static final Info FILLED_NEW_ARRAY_RANGE =
+        new Info(Opcodes.FILLED_NEW_ARRAY_RANGE, "filled-new-array/range",
+            InstructionCodec.FORMAT_3RC, IndexType.TYPE_REF);
+
+    public static final Info FILL_ARRAY_DATA =
+        new Info(Opcodes.FILL_ARRAY_DATA, "fill-array-data",
+            InstructionCodec.FORMAT_31T, IndexType.NONE);
+
+    public static final Info THROW =
+        new Info(Opcodes.THROW, "throw",
+            InstructionCodec.FORMAT_11X, IndexType.NONE);
+
+    public static final Info GOTO =
+        new Info(Opcodes.GOTO, "goto",
+            InstructionCodec.FORMAT_10T, IndexType.NONE);
+
+    public static final Info GOTO_16 =
+        new Info(Opcodes.GOTO_16, "goto/16",
+            InstructionCodec.FORMAT_20T, IndexType.NONE);
+
+    public static final Info GOTO_32 =
+        new Info(Opcodes.GOTO_32, "goto/32",
+            InstructionCodec.FORMAT_30T, IndexType.NONE);
+
+    public static final Info PACKED_SWITCH =
+        new Info(Opcodes.PACKED_SWITCH, "packed-switch",
+            InstructionCodec.FORMAT_31T, IndexType.NONE);
+
+    public static final Info SPARSE_SWITCH =
+        new Info(Opcodes.SPARSE_SWITCH, "sparse-switch",
+            InstructionCodec.FORMAT_31T, IndexType.NONE);
+
+    public static final Info CMPL_FLOAT =
+        new Info(Opcodes.CMPL_FLOAT, "cmpl-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info CMPG_FLOAT =
+        new Info(Opcodes.CMPG_FLOAT, "cmpg-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info CMPL_DOUBLE =
+        new Info(Opcodes.CMPL_DOUBLE, "cmpl-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info CMPG_DOUBLE =
+        new Info(Opcodes.CMPG_DOUBLE, "cmpg-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info CMP_LONG =
+        new Info(Opcodes.CMP_LONG, "cmp-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info IF_EQ =
+        new Info(Opcodes.IF_EQ, "if-eq",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_NE =
+        new Info(Opcodes.IF_NE, "if-ne",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_LT =
+        new Info(Opcodes.IF_LT, "if-lt",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_GE =
+        new Info(Opcodes.IF_GE, "if-ge",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_GT =
+        new Info(Opcodes.IF_GT, "if-gt",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_LE =
+        new Info(Opcodes.IF_LE, "if-le",
+            InstructionCodec.FORMAT_22T, IndexType.NONE);
+
+    public static final Info IF_EQZ =
+        new Info(Opcodes.IF_EQZ, "if-eqz",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info IF_NEZ =
+        new Info(Opcodes.IF_NEZ, "if-nez",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info IF_LTZ =
+        new Info(Opcodes.IF_LTZ, "if-ltz",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info IF_GEZ =
+        new Info(Opcodes.IF_GEZ, "if-gez",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info IF_GTZ =
+        new Info(Opcodes.IF_GTZ, "if-gtz",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info IF_LEZ =
+        new Info(Opcodes.IF_LEZ, "if-lez",
+            InstructionCodec.FORMAT_21T, IndexType.NONE);
+
+    public static final Info AGET =
+        new Info(Opcodes.AGET, "aget",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_WIDE =
+        new Info(Opcodes.AGET_WIDE, "aget-wide",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_OBJECT =
+        new Info(Opcodes.AGET_OBJECT, "aget-object",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_BOOLEAN =
+        new Info(Opcodes.AGET_BOOLEAN, "aget-boolean",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_BYTE =
+        new Info(Opcodes.AGET_BYTE, "aget-byte",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_CHAR =
+        new Info(Opcodes.AGET_CHAR, "aget-char",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AGET_SHORT =
+        new Info(Opcodes.AGET_SHORT, "aget-short",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT =
+        new Info(Opcodes.APUT, "aput",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_WIDE =
+        new Info(Opcodes.APUT_WIDE, "aput-wide",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_OBJECT =
+        new Info(Opcodes.APUT_OBJECT, "aput-object",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_BOOLEAN =
+        new Info(Opcodes.APUT_BOOLEAN, "aput-boolean",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_BYTE =
+        new Info(Opcodes.APUT_BYTE, "aput-byte",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_CHAR =
+        new Info(Opcodes.APUT_CHAR, "aput-char",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info APUT_SHORT =
+        new Info(Opcodes.APUT_SHORT, "aput-short",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info IGET =
+        new Info(Opcodes.IGET, "iget",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_WIDE =
+        new Info(Opcodes.IGET_WIDE, "iget-wide",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_OBJECT =
+        new Info(Opcodes.IGET_OBJECT, "iget-object",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_BOOLEAN =
+        new Info(Opcodes.IGET_BOOLEAN, "iget-boolean",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_BYTE =
+        new Info(Opcodes.IGET_BYTE, "iget-byte",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_CHAR =
+        new Info(Opcodes.IGET_CHAR, "iget-char",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IGET_SHORT =
+        new Info(Opcodes.IGET_SHORT, "iget-short",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT =
+        new Info(Opcodes.IPUT, "iput",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_WIDE =
+        new Info(Opcodes.IPUT_WIDE, "iput-wide",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_OBJECT =
+        new Info(Opcodes.IPUT_OBJECT, "iput-object",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_BOOLEAN =
+        new Info(Opcodes.IPUT_BOOLEAN, "iput-boolean",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_BYTE =
+        new Info(Opcodes.IPUT_BYTE, "iput-byte",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_CHAR =
+        new Info(Opcodes.IPUT_CHAR, "iput-char",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_SHORT =
+        new Info(Opcodes.IPUT_SHORT, "iput-short",
+            InstructionCodec.FORMAT_22C, IndexType.FIELD_REF);
+
+    public static final Info SGET =
+        new Info(Opcodes.SGET, "sget",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_WIDE =
+        new Info(Opcodes.SGET_WIDE, "sget-wide",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_OBJECT =
+        new Info(Opcodes.SGET_OBJECT, "sget-object",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_BOOLEAN =
+        new Info(Opcodes.SGET_BOOLEAN, "sget-boolean",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_BYTE =
+        new Info(Opcodes.SGET_BYTE, "sget-byte",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_CHAR =
+        new Info(Opcodes.SGET_CHAR, "sget-char",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SGET_SHORT =
+        new Info(Opcodes.SGET_SHORT, "sget-short",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT =
+        new Info(Opcodes.SPUT, "sput",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_WIDE =
+        new Info(Opcodes.SPUT_WIDE, "sput-wide",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_OBJECT =
+        new Info(Opcodes.SPUT_OBJECT, "sput-object",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_BOOLEAN =
+        new Info(Opcodes.SPUT_BOOLEAN, "sput-boolean",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_BYTE =
+        new Info(Opcodes.SPUT_BYTE, "sput-byte",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_CHAR =
+        new Info(Opcodes.SPUT_CHAR, "sput-char",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_SHORT =
+        new Info(Opcodes.SPUT_SHORT, "sput-short",
+            InstructionCodec.FORMAT_21C, IndexType.FIELD_REF);
+
+    public static final Info INVOKE_VIRTUAL =
+        new Info(Opcodes.INVOKE_VIRTUAL, "invoke-virtual",
+            InstructionCodec.FORMAT_35C, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_SUPER =
+        new Info(Opcodes.INVOKE_SUPER, "invoke-super",
+            InstructionCodec.FORMAT_35C, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_DIRECT =
+        new Info(Opcodes.INVOKE_DIRECT, "invoke-direct",
+            InstructionCodec.FORMAT_35C, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_STATIC =
+        new Info(Opcodes.INVOKE_STATIC, "invoke-static",
+            InstructionCodec.FORMAT_35C, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_INTERFACE =
+        new Info(Opcodes.INVOKE_INTERFACE, "invoke-interface",
+            InstructionCodec.FORMAT_35C, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_VIRTUAL_RANGE =
+        new Info(Opcodes.INVOKE_VIRTUAL_RANGE, "invoke-virtual/range",
+            InstructionCodec.FORMAT_3RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_SUPER_RANGE =
+        new Info(Opcodes.INVOKE_SUPER_RANGE, "invoke-super/range",
+            InstructionCodec.FORMAT_3RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_DIRECT_RANGE =
+        new Info(Opcodes.INVOKE_DIRECT_RANGE, "invoke-direct/range",
+            InstructionCodec.FORMAT_3RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_STATIC_RANGE =
+        new Info(Opcodes.INVOKE_STATIC_RANGE, "invoke-static/range",
+            InstructionCodec.FORMAT_3RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_INTERFACE_RANGE =
+        new Info(Opcodes.INVOKE_INTERFACE_RANGE, "invoke-interface/range",
+            InstructionCodec.FORMAT_3RC, IndexType.METHOD_REF);
+
+    public static final Info NEG_INT =
+        new Info(Opcodes.NEG_INT, "neg-int",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NOT_INT =
+        new Info(Opcodes.NOT_INT, "not-int",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NEG_LONG =
+        new Info(Opcodes.NEG_LONG, "neg-long",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NOT_LONG =
+        new Info(Opcodes.NOT_LONG, "not-long",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NEG_FLOAT =
+        new Info(Opcodes.NEG_FLOAT, "neg-float",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info NEG_DOUBLE =
+        new Info(Opcodes.NEG_DOUBLE, "neg-double",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_LONG =
+        new Info(Opcodes.INT_TO_LONG, "int-to-long",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_FLOAT =
+        new Info(Opcodes.INT_TO_FLOAT, "int-to-float",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_DOUBLE =
+        new Info(Opcodes.INT_TO_DOUBLE, "int-to-double",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info LONG_TO_INT =
+        new Info(Opcodes.LONG_TO_INT, "long-to-int",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info LONG_TO_FLOAT =
+        new Info(Opcodes.LONG_TO_FLOAT, "long-to-float",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info LONG_TO_DOUBLE =
+        new Info(Opcodes.LONG_TO_DOUBLE, "long-to-double",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info FLOAT_TO_INT =
+        new Info(Opcodes.FLOAT_TO_INT, "float-to-int",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info FLOAT_TO_LONG =
+        new Info(Opcodes.FLOAT_TO_LONG, "float-to-long",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info FLOAT_TO_DOUBLE =
+        new Info(Opcodes.FLOAT_TO_DOUBLE, "float-to-double",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DOUBLE_TO_INT =
+        new Info(Opcodes.DOUBLE_TO_INT, "double-to-int",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DOUBLE_TO_LONG =
+        new Info(Opcodes.DOUBLE_TO_LONG, "double-to-long",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DOUBLE_TO_FLOAT =
+        new Info(Opcodes.DOUBLE_TO_FLOAT, "double-to-float",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_BYTE =
+        new Info(Opcodes.INT_TO_BYTE, "int-to-byte",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_CHAR =
+        new Info(Opcodes.INT_TO_CHAR, "int-to-char",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info INT_TO_SHORT =
+        new Info(Opcodes.INT_TO_SHORT, "int-to-short",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info ADD_INT =
+        new Info(Opcodes.ADD_INT, "add-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SUB_INT =
+        new Info(Opcodes.SUB_INT, "sub-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info MUL_INT =
+        new Info(Opcodes.MUL_INT, "mul-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info DIV_INT =
+        new Info(Opcodes.DIV_INT, "div-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info REM_INT =
+        new Info(Opcodes.REM_INT, "rem-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AND_INT =
+        new Info(Opcodes.AND_INT, "and-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info OR_INT =
+        new Info(Opcodes.OR_INT, "or-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info XOR_INT =
+        new Info(Opcodes.XOR_INT, "xor-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SHL_INT =
+        new Info(Opcodes.SHL_INT, "shl-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SHR_INT =
+        new Info(Opcodes.SHR_INT, "shr-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info USHR_INT =
+        new Info(Opcodes.USHR_INT, "ushr-int",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info ADD_LONG =
+        new Info(Opcodes.ADD_LONG, "add-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SUB_LONG =
+        new Info(Opcodes.SUB_LONG, "sub-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info MUL_LONG =
+        new Info(Opcodes.MUL_LONG, "mul-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info DIV_LONG =
+        new Info(Opcodes.DIV_LONG, "div-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info REM_LONG =
+        new Info(Opcodes.REM_LONG, "rem-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info AND_LONG =
+        new Info(Opcodes.AND_LONG, "and-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info OR_LONG =
+        new Info(Opcodes.OR_LONG, "or-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info XOR_LONG =
+        new Info(Opcodes.XOR_LONG, "xor-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SHL_LONG =
+        new Info(Opcodes.SHL_LONG, "shl-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SHR_LONG =
+        new Info(Opcodes.SHR_LONG, "shr-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info USHR_LONG =
+        new Info(Opcodes.USHR_LONG, "ushr-long",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info ADD_FLOAT =
+        new Info(Opcodes.ADD_FLOAT, "add-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SUB_FLOAT =
+        new Info(Opcodes.SUB_FLOAT, "sub-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info MUL_FLOAT =
+        new Info(Opcodes.MUL_FLOAT, "mul-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info DIV_FLOAT =
+        new Info(Opcodes.DIV_FLOAT, "div-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info REM_FLOAT =
+        new Info(Opcodes.REM_FLOAT, "rem-float",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info ADD_DOUBLE =
+        new Info(Opcodes.ADD_DOUBLE, "add-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info SUB_DOUBLE =
+        new Info(Opcodes.SUB_DOUBLE, "sub-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info MUL_DOUBLE =
+        new Info(Opcodes.MUL_DOUBLE, "mul-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info DIV_DOUBLE =
+        new Info(Opcodes.DIV_DOUBLE, "div-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info REM_DOUBLE =
+        new Info(Opcodes.REM_DOUBLE, "rem-double",
+            InstructionCodec.FORMAT_23X, IndexType.NONE);
+
+    public static final Info ADD_INT_2ADDR =
+        new Info(Opcodes.ADD_INT_2ADDR, "add-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SUB_INT_2ADDR =
+        new Info(Opcodes.SUB_INT_2ADDR, "sub-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MUL_INT_2ADDR =
+        new Info(Opcodes.MUL_INT_2ADDR, "mul-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DIV_INT_2ADDR =
+        new Info(Opcodes.DIV_INT_2ADDR, "div-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info REM_INT_2ADDR =
+        new Info(Opcodes.REM_INT_2ADDR, "rem-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info AND_INT_2ADDR =
+        new Info(Opcodes.AND_INT_2ADDR, "and-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info OR_INT_2ADDR =
+        new Info(Opcodes.OR_INT_2ADDR, "or-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info XOR_INT_2ADDR =
+        new Info(Opcodes.XOR_INT_2ADDR, "xor-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SHL_INT_2ADDR =
+        new Info(Opcodes.SHL_INT_2ADDR, "shl-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SHR_INT_2ADDR =
+        new Info(Opcodes.SHR_INT_2ADDR, "shr-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info USHR_INT_2ADDR =
+        new Info(Opcodes.USHR_INT_2ADDR, "ushr-int/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info ADD_LONG_2ADDR =
+        new Info(Opcodes.ADD_LONG_2ADDR, "add-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SUB_LONG_2ADDR =
+        new Info(Opcodes.SUB_LONG_2ADDR, "sub-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MUL_LONG_2ADDR =
+        new Info(Opcodes.MUL_LONG_2ADDR, "mul-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DIV_LONG_2ADDR =
+        new Info(Opcodes.DIV_LONG_2ADDR, "div-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info REM_LONG_2ADDR =
+        new Info(Opcodes.REM_LONG_2ADDR, "rem-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info AND_LONG_2ADDR =
+        new Info(Opcodes.AND_LONG_2ADDR, "and-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info OR_LONG_2ADDR =
+        new Info(Opcodes.OR_LONG_2ADDR, "or-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info XOR_LONG_2ADDR =
+        new Info(Opcodes.XOR_LONG_2ADDR, "xor-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SHL_LONG_2ADDR =
+        new Info(Opcodes.SHL_LONG_2ADDR, "shl-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SHR_LONG_2ADDR =
+        new Info(Opcodes.SHR_LONG_2ADDR, "shr-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info USHR_LONG_2ADDR =
+        new Info(Opcodes.USHR_LONG_2ADDR, "ushr-long/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info ADD_FLOAT_2ADDR =
+        new Info(Opcodes.ADD_FLOAT_2ADDR, "add-float/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SUB_FLOAT_2ADDR =
+        new Info(Opcodes.SUB_FLOAT_2ADDR, "sub-float/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MUL_FLOAT_2ADDR =
+        new Info(Opcodes.MUL_FLOAT_2ADDR, "mul-float/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DIV_FLOAT_2ADDR =
+        new Info(Opcodes.DIV_FLOAT_2ADDR, "div-float/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info REM_FLOAT_2ADDR =
+        new Info(Opcodes.REM_FLOAT_2ADDR, "rem-float/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info ADD_DOUBLE_2ADDR =
+        new Info(Opcodes.ADD_DOUBLE_2ADDR, "add-double/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info SUB_DOUBLE_2ADDR =
+        new Info(Opcodes.SUB_DOUBLE_2ADDR, "sub-double/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info MUL_DOUBLE_2ADDR =
+        new Info(Opcodes.MUL_DOUBLE_2ADDR, "mul-double/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info DIV_DOUBLE_2ADDR =
+        new Info(Opcodes.DIV_DOUBLE_2ADDR, "div-double/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info REM_DOUBLE_2ADDR =
+        new Info(Opcodes.REM_DOUBLE_2ADDR, "rem-double/2addr",
+            InstructionCodec.FORMAT_12X, IndexType.NONE);
+
+    public static final Info ADD_INT_LIT16 =
+        new Info(Opcodes.ADD_INT_LIT16, "add-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info RSUB_INT =
+        new Info(Opcodes.RSUB_INT, "rsub-int",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info MUL_INT_LIT16 =
+        new Info(Opcodes.MUL_INT_LIT16, "mul-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info DIV_INT_LIT16 =
+        new Info(Opcodes.DIV_INT_LIT16, "div-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info REM_INT_LIT16 =
+        new Info(Opcodes.REM_INT_LIT16, "rem-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info AND_INT_LIT16 =
+        new Info(Opcodes.AND_INT_LIT16, "and-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info OR_INT_LIT16 =
+        new Info(Opcodes.OR_INT_LIT16, "or-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info XOR_INT_LIT16 =
+        new Info(Opcodes.XOR_INT_LIT16, "xor-int/lit16",
+            InstructionCodec.FORMAT_22S, IndexType.NONE);
+
+    public static final Info ADD_INT_LIT8 =
+        new Info(Opcodes.ADD_INT_LIT8, "add-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info RSUB_INT_LIT8 =
+        new Info(Opcodes.RSUB_INT_LIT8, "rsub-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info MUL_INT_LIT8 =
+        new Info(Opcodes.MUL_INT_LIT8, "mul-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info DIV_INT_LIT8 =
+        new Info(Opcodes.DIV_INT_LIT8, "div-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info REM_INT_LIT8 =
+        new Info(Opcodes.REM_INT_LIT8, "rem-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info AND_INT_LIT8 =
+        new Info(Opcodes.AND_INT_LIT8, "and-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info OR_INT_LIT8 =
+        new Info(Opcodes.OR_INT_LIT8, "or-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info XOR_INT_LIT8 =
+        new Info(Opcodes.XOR_INT_LIT8, "xor-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info SHL_INT_LIT8 =
+        new Info(Opcodes.SHL_INT_LIT8, "shl-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info SHR_INT_LIT8 =
+        new Info(Opcodes.SHR_INT_LIT8, "shr-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info USHR_INT_LIT8 =
+        new Info(Opcodes.USHR_INT_LIT8, "ushr-int/lit8",
+            InstructionCodec.FORMAT_22B, IndexType.NONE);
+
+    public static final Info CONST_CLASS_JUMBO =
+        new Info(Opcodes.CONST_CLASS_JUMBO, "const-class/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.TYPE_REF);
+
+    public static final Info CHECK_CAST_JUMBO =
+        new Info(Opcodes.CHECK_CAST_JUMBO, "check-cast/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.TYPE_REF);
+
+    public static final Info INSTANCE_OF_JUMBO =
+        new Info(Opcodes.INSTANCE_OF_JUMBO, "instance-of/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.TYPE_REF);
+
+    public static final Info NEW_INSTANCE_JUMBO =
+        new Info(Opcodes.NEW_INSTANCE_JUMBO, "new-instance/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.TYPE_REF);
+
+    public static final Info NEW_ARRAY_JUMBO =
+        new Info(Opcodes.NEW_ARRAY_JUMBO, "new-array/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.TYPE_REF);
+
+    public static final Info FILLED_NEW_ARRAY_JUMBO =
+        new Info(Opcodes.FILLED_NEW_ARRAY_JUMBO, "filled-new-array/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.TYPE_REF);
+
+    public static final Info IGET_JUMBO =
+        new Info(Opcodes.IGET_JUMBO, "iget/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_WIDE_JUMBO =
+        new Info(Opcodes.IGET_WIDE_JUMBO, "iget-wide/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_OBJECT_JUMBO =
+        new Info(Opcodes.IGET_OBJECT_JUMBO, "iget-object/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_BOOLEAN_JUMBO =
+        new Info(Opcodes.IGET_BOOLEAN_JUMBO, "iget-boolean/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_BYTE_JUMBO =
+        new Info(Opcodes.IGET_BYTE_JUMBO, "iget-byte/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_CHAR_JUMBO =
+        new Info(Opcodes.IGET_CHAR_JUMBO, "iget-char/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IGET_SHORT_JUMBO =
+        new Info(Opcodes.IGET_SHORT_JUMBO, "iget-short/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_JUMBO =
+        new Info(Opcodes.IPUT_JUMBO, "iput/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_WIDE_JUMBO =
+        new Info(Opcodes.IPUT_WIDE_JUMBO, "iput-wide/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_OBJECT_JUMBO =
+        new Info(Opcodes.IPUT_OBJECT_JUMBO, "iput-object/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_BOOLEAN_JUMBO =
+        new Info(Opcodes.IPUT_BOOLEAN_JUMBO, "iput-boolean/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_BYTE_JUMBO =
+        new Info(Opcodes.IPUT_BYTE_JUMBO, "iput-byte/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_CHAR_JUMBO =
+        new Info(Opcodes.IPUT_CHAR_JUMBO, "iput-char/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info IPUT_SHORT_JUMBO =
+        new Info(Opcodes.IPUT_SHORT_JUMBO, "iput-short/jumbo",
+            InstructionCodec.FORMAT_52C, IndexType.FIELD_REF);
+
+    public static final Info SGET_JUMBO =
+        new Info(Opcodes.SGET_JUMBO, "sget/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_WIDE_JUMBO =
+        new Info(Opcodes.SGET_WIDE_JUMBO, "sget-wide/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_OBJECT_JUMBO =
+        new Info(Opcodes.SGET_OBJECT_JUMBO, "sget-object/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_BOOLEAN_JUMBO =
+        new Info(Opcodes.SGET_BOOLEAN_JUMBO, "sget-boolean/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_BYTE_JUMBO =
+        new Info(Opcodes.SGET_BYTE_JUMBO, "sget-byte/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_CHAR_JUMBO =
+        new Info(Opcodes.SGET_CHAR_JUMBO, "sget-char/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SGET_SHORT_JUMBO =
+        new Info(Opcodes.SGET_SHORT_JUMBO, "sget-short/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_JUMBO =
+        new Info(Opcodes.SPUT_JUMBO, "sput/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_WIDE_JUMBO =
+        new Info(Opcodes.SPUT_WIDE_JUMBO, "sput-wide/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_OBJECT_JUMBO =
+        new Info(Opcodes.SPUT_OBJECT_JUMBO, "sput-object/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_BOOLEAN_JUMBO =
+        new Info(Opcodes.SPUT_BOOLEAN_JUMBO, "sput-boolean/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_BYTE_JUMBO =
+        new Info(Opcodes.SPUT_BYTE_JUMBO, "sput-byte/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_CHAR_JUMBO =
+        new Info(Opcodes.SPUT_CHAR_JUMBO, "sput-char/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info SPUT_SHORT_JUMBO =
+        new Info(Opcodes.SPUT_SHORT_JUMBO, "sput-short/jumbo",
+            InstructionCodec.FORMAT_41C, IndexType.FIELD_REF);
+
+    public static final Info INVOKE_VIRTUAL_JUMBO =
+        new Info(Opcodes.INVOKE_VIRTUAL_JUMBO, "invoke-virtual/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_SUPER_JUMBO =
+        new Info(Opcodes.INVOKE_SUPER_JUMBO, "invoke-super/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_DIRECT_JUMBO =
+        new Info(Opcodes.INVOKE_DIRECT_JUMBO, "invoke-direct/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_STATIC_JUMBO =
+        new Info(Opcodes.INVOKE_STATIC_JUMBO, "invoke-static/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.METHOD_REF);
+
+    public static final Info INVOKE_INTERFACE_JUMBO =
+        new Info(Opcodes.INVOKE_INTERFACE_JUMBO, "invoke-interface/jumbo",
+            InstructionCodec.FORMAT_5RC, IndexType.METHOD_REF);
+
+    // END(opcode-info-defs)
+
+    // Static initialization.
+    static {
+        INFO = new Info[Opcodes.MAX_VALUE - Opcodes.MIN_VALUE + 1];
+
+        // TODO: Stop using this constant.
+        set(SPECIAL_FORMAT);
+
+        // TODO: These payload opcodes should be generated by opcode-gen.
+        set(PACKED_SWITCH_PAYLOAD);
+        set(SPARSE_SWITCH_PAYLOAD);
+        set(FILL_ARRAY_DATA_PAYLOAD);
+
+        // BEGIN(opcode-info-init); GENERATED AUTOMATICALLY BY opcode-gen
+        set(NOP);
+        set(MOVE);
+        set(MOVE_FROM16);
+        set(MOVE_16);
+        set(MOVE_WIDE);
+        set(MOVE_WIDE_FROM16);
+        set(MOVE_WIDE_16);
+        set(MOVE_OBJECT);
+        set(MOVE_OBJECT_FROM16);
+        set(MOVE_OBJECT_16);
+        set(MOVE_RESULT);
+        set(MOVE_RESULT_WIDE);
+        set(MOVE_RESULT_OBJECT);
+        set(MOVE_EXCEPTION);
+        set(RETURN_VOID);
+        set(RETURN);
+        set(RETURN_WIDE);
+        set(RETURN_OBJECT);
+        set(CONST_4);
+        set(CONST_16);
+        set(CONST);
+        set(CONST_HIGH16);
+        set(CONST_WIDE_16);
+        set(CONST_WIDE_32);
+        set(CONST_WIDE);
+        set(CONST_WIDE_HIGH16);
+        set(CONST_STRING);
+        set(CONST_STRING_JUMBO);
+        set(CONST_CLASS);
+        set(MONITOR_ENTER);
+        set(MONITOR_EXIT);
+        set(CHECK_CAST);
+        set(INSTANCE_OF);
+        set(ARRAY_LENGTH);
+        set(NEW_INSTANCE);
+        set(NEW_ARRAY);
+        set(FILLED_NEW_ARRAY);
+        set(FILLED_NEW_ARRAY_RANGE);
+        set(FILL_ARRAY_DATA);
+        set(THROW);
+        set(GOTO);
+        set(GOTO_16);
+        set(GOTO_32);
+        set(PACKED_SWITCH);
+        set(SPARSE_SWITCH);
+        set(CMPL_FLOAT);
+        set(CMPG_FLOAT);
+        set(CMPL_DOUBLE);
+        set(CMPG_DOUBLE);
+        set(CMP_LONG);
+        set(IF_EQ);
+        set(IF_NE);
+        set(IF_LT);
+        set(IF_GE);
+        set(IF_GT);
+        set(IF_LE);
+        set(IF_EQZ);
+        set(IF_NEZ);
+        set(IF_LTZ);
+        set(IF_GEZ);
+        set(IF_GTZ);
+        set(IF_LEZ);
+        set(AGET);
+        set(AGET_WIDE);
+        set(AGET_OBJECT);
+        set(AGET_BOOLEAN);
+        set(AGET_BYTE);
+        set(AGET_CHAR);
+        set(AGET_SHORT);
+        set(APUT);
+        set(APUT_WIDE);
+        set(APUT_OBJECT);
+        set(APUT_BOOLEAN);
+        set(APUT_BYTE);
+        set(APUT_CHAR);
+        set(APUT_SHORT);
+        set(IGET);
+        set(IGET_WIDE);
+        set(IGET_OBJECT);
+        set(IGET_BOOLEAN);
+        set(IGET_BYTE);
+        set(IGET_CHAR);
+        set(IGET_SHORT);
+        set(IPUT);
+        set(IPUT_WIDE);
+        set(IPUT_OBJECT);
+        set(IPUT_BOOLEAN);
+        set(IPUT_BYTE);
+        set(IPUT_CHAR);
+        set(IPUT_SHORT);
+        set(SGET);
+        set(SGET_WIDE);
+        set(SGET_OBJECT);
+        set(SGET_BOOLEAN);
+        set(SGET_BYTE);
+        set(SGET_CHAR);
+        set(SGET_SHORT);
+        set(SPUT);
+        set(SPUT_WIDE);
+        set(SPUT_OBJECT);
+        set(SPUT_BOOLEAN);
+        set(SPUT_BYTE);
+        set(SPUT_CHAR);
+        set(SPUT_SHORT);
+        set(INVOKE_VIRTUAL);
+        set(INVOKE_SUPER);
+        set(INVOKE_DIRECT);
+        set(INVOKE_STATIC);
+        set(INVOKE_INTERFACE);
+        set(INVOKE_VIRTUAL_RANGE);
+        set(INVOKE_SUPER_RANGE);
+        set(INVOKE_DIRECT_RANGE);
+        set(INVOKE_STATIC_RANGE);
+        set(INVOKE_INTERFACE_RANGE);
+        set(NEG_INT);
+        set(NOT_INT);
+        set(NEG_LONG);
+        set(NOT_LONG);
+        set(NEG_FLOAT);
+        set(NEG_DOUBLE);
+        set(INT_TO_LONG);
+        set(INT_TO_FLOAT);
+        set(INT_TO_DOUBLE);
+        set(LONG_TO_INT);
+        set(LONG_TO_FLOAT);
+        set(LONG_TO_DOUBLE);
+        set(FLOAT_TO_INT);
+        set(FLOAT_TO_LONG);
+        set(FLOAT_TO_DOUBLE);
+        set(DOUBLE_TO_INT);
+        set(DOUBLE_TO_LONG);
+        set(DOUBLE_TO_FLOAT);
+        set(INT_TO_BYTE);
+        set(INT_TO_CHAR);
+        set(INT_TO_SHORT);
+        set(ADD_INT);
+        set(SUB_INT);
+        set(MUL_INT);
+        set(DIV_INT);
+        set(REM_INT);
+        set(AND_INT);
+        set(OR_INT);
+        set(XOR_INT);
+        set(SHL_INT);
+        set(SHR_INT);
+        set(USHR_INT);
+        set(ADD_LONG);
+        set(SUB_LONG);
+        set(MUL_LONG);
+        set(DIV_LONG);
+        set(REM_LONG);
+        set(AND_LONG);
+        set(OR_LONG);
+        set(XOR_LONG);
+        set(SHL_LONG);
+        set(SHR_LONG);
+        set(USHR_LONG);
+        set(ADD_FLOAT);
+        set(SUB_FLOAT);
+        set(MUL_FLOAT);
+        set(DIV_FLOAT);
+        set(REM_FLOAT);
+        set(ADD_DOUBLE);
+        set(SUB_DOUBLE);
+        set(MUL_DOUBLE);
+        set(DIV_DOUBLE);
+        set(REM_DOUBLE);
+        set(ADD_INT_2ADDR);
+        set(SUB_INT_2ADDR);
+        set(MUL_INT_2ADDR);
+        set(DIV_INT_2ADDR);
+        set(REM_INT_2ADDR);
+        set(AND_INT_2ADDR);
+        set(OR_INT_2ADDR);
+        set(XOR_INT_2ADDR);
+        set(SHL_INT_2ADDR);
+        set(SHR_INT_2ADDR);
+        set(USHR_INT_2ADDR);
+        set(ADD_LONG_2ADDR);
+        set(SUB_LONG_2ADDR);
+        set(MUL_LONG_2ADDR);
+        set(DIV_LONG_2ADDR);
+        set(REM_LONG_2ADDR);
+        set(AND_LONG_2ADDR);
+        set(OR_LONG_2ADDR);
+        set(XOR_LONG_2ADDR);
+        set(SHL_LONG_2ADDR);
+        set(SHR_LONG_2ADDR);
+        set(USHR_LONG_2ADDR);
+        set(ADD_FLOAT_2ADDR);
+        set(SUB_FLOAT_2ADDR);
+        set(MUL_FLOAT_2ADDR);
+        set(DIV_FLOAT_2ADDR);
+        set(REM_FLOAT_2ADDR);
+        set(ADD_DOUBLE_2ADDR);
+        set(SUB_DOUBLE_2ADDR);
+        set(MUL_DOUBLE_2ADDR);
+        set(DIV_DOUBLE_2ADDR);
+        set(REM_DOUBLE_2ADDR);
+        set(ADD_INT_LIT16);
+        set(RSUB_INT);
+        set(MUL_INT_LIT16);
+        set(DIV_INT_LIT16);
+        set(REM_INT_LIT16);
+        set(AND_INT_LIT16);
+        set(OR_INT_LIT16);
+        set(XOR_INT_LIT16);
+        set(ADD_INT_LIT8);
+        set(RSUB_INT_LIT8);
+        set(MUL_INT_LIT8);
+        set(DIV_INT_LIT8);
+        set(REM_INT_LIT8);
+        set(AND_INT_LIT8);
+        set(OR_INT_LIT8);
+        set(XOR_INT_LIT8);
+        set(SHL_INT_LIT8);
+        set(SHR_INT_LIT8);
+        set(USHR_INT_LIT8);
+        set(CONST_CLASS_JUMBO);
+        set(CHECK_CAST_JUMBO);
+        set(INSTANCE_OF_JUMBO);
+        set(NEW_INSTANCE_JUMBO);
+        set(NEW_ARRAY_JUMBO);
+        set(FILLED_NEW_ARRAY_JUMBO);
+        set(IGET_JUMBO);
+        set(IGET_WIDE_JUMBO);
+        set(IGET_OBJECT_JUMBO);
+        set(IGET_BOOLEAN_JUMBO);
+        set(IGET_BYTE_JUMBO);
+        set(IGET_CHAR_JUMBO);
+        set(IGET_SHORT_JUMBO);
+        set(IPUT_JUMBO);
+        set(IPUT_WIDE_JUMBO);
+        set(IPUT_OBJECT_JUMBO);
+        set(IPUT_BOOLEAN_JUMBO);
+        set(IPUT_BYTE_JUMBO);
+        set(IPUT_CHAR_JUMBO);
+        set(IPUT_SHORT_JUMBO);
+        set(SGET_JUMBO);
+        set(SGET_WIDE_JUMBO);
+        set(SGET_OBJECT_JUMBO);
+        set(SGET_BOOLEAN_JUMBO);
+        set(SGET_BYTE_JUMBO);
+        set(SGET_CHAR_JUMBO);
+        set(SGET_SHORT_JUMBO);
+        set(SPUT_JUMBO);
+        set(SPUT_WIDE_JUMBO);
+        set(SPUT_OBJECT_JUMBO);
+        set(SPUT_BOOLEAN_JUMBO);
+        set(SPUT_BYTE_JUMBO);
+        set(SPUT_CHAR_JUMBO);
+        set(SPUT_SHORT_JUMBO);
+        set(INVOKE_VIRTUAL_JUMBO);
+        set(INVOKE_SUPER_JUMBO);
+        set(INVOKE_DIRECT_JUMBO);
+        set(INVOKE_STATIC_JUMBO);
+        set(INVOKE_INTERFACE_JUMBO);
+        // END(opcode-info-init)
+    }
+
+    /**
+     * This class is uninstantiable.
+     */
+    private OpcodeInfo() {
+        // This space intentionally left blank.
+    }
+
+    /**
+     * Gets the {@link @Info} for the given opcode value.
+     *
+     * @param opcode {@code Opcodes.MIN_VALUE..Opcodes.MAX_VALUE;} the
+     * opcode value
+     * @return non-null; the associated opcode information instance
+     */
+    public static Info get(int opcode) {
+        int idx = opcode - Opcodes.MIN_VALUE;
+
+        try {
+            Info result = INFO[idx];
+            if (result != null) {
+                return result;
+            }
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            // Fall through.
+        }
+
+        throw new IllegalArgumentException("bogus opcode: "
+                + Hex.u2or4(opcode));
+    }
+
+    /**
+     * Gets the name of the given opcode.
+     */
+    public static String getName(int opcode) {
+        return get(opcode).getName();
+    }
+
+    /**
+     * Gets the format (an {@link InstructionCodec}) for the given opcode
+     * value.
+     */
+    public static InstructionCodec getFormat(int opcode) {
+        return get(opcode).getFormat();
+    }
+
+    /**
+     * Gets the {@link IndexType} for the given opcode value.
+     */
+    public static IndexType getIndexType(int opcode) {
+        return get(opcode).getIndexType();
+    }
+
+    /**
+     * Puts the given opcode into the table of all ops.
+     *
+     * @param opcode non-null; the opcode
+     */
+    private static void set(Info opcode) {
+        int idx = opcode.getOpcode() - Opcodes.MIN_VALUE;
+        INFO[idx] = opcode;
+    }
+
+    /**
+     * Information about an opcode.
+     */
+    public static class Info {
+        private final int opcode;
+        private final String name;
+        private final InstructionCodec format;
+        private final IndexType indexType;
+
+        public Info(int opcode, String name, InstructionCodec format,
+                IndexType indexType) {
+            this.opcode = opcode;
+            this.name = name;
+            this.format = format;
+            this.indexType = indexType;
+        }
+
+        public int getOpcode() {
+            return opcode;
+        }
+
+        public String getName() {
+            return name;
+        }
+
+        public InstructionCodec getFormat() {
+            return format;
+        }
+
+        public IndexType getIndexType() {
+            return indexType;
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/io/Opcodes.java b/dx/src/com/android/dx/io/Opcodes.java
new file mode 100644
index 0000000..bf0aa66
--- /dev/null
+++ b/dx/src/com/android/dx/io/Opcodes.java
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+/**
+ * All the Dalvik opcode value constants. See the related spec
+ * document for the meaning and instruction format of each opcode.
+ */
+public final class Opcodes {
+    /**
+     * pseudo-opcode used for nonstandard format payload "instructions". TODO:
+     * Retire this concept, and start treating the payload instructions
+     * more like the rest.
+     */
+    public static final int SPECIAL_FORMAT = -1;
+
+    /**
+     * pseudo-opcode used to indicate there is no next opcode; used
+     * in opcode chaining lists
+     */
+    public static final int NO_NEXT = -1;
+
+    /** minimum valid opcode value */
+    public static final int MIN_VALUE = -1;
+
+    /** maximum valid opcode value */
+    public static final int MAX_VALUE = 0xffff;
+
+    // BEGIN(opcodes); GENERATED AUTOMATICALLY BY opcode-gen
+    public static final int NOP = 0x00;
+    public static final int MOVE = 0x01;
+    public static final int MOVE_FROM16 = 0x02;
+    public static final int MOVE_16 = 0x03;
+    public static final int MOVE_WIDE = 0x04;
+    public static final int MOVE_WIDE_FROM16 = 0x05;
+    public static final int MOVE_WIDE_16 = 0x06;
+    public static final int MOVE_OBJECT = 0x07;
+    public static final int MOVE_OBJECT_FROM16 = 0x08;
+    public static final int MOVE_OBJECT_16 = 0x09;
+    public static final int MOVE_RESULT = 0x0a;
+    public static final int MOVE_RESULT_WIDE = 0x0b;
+    public static final int MOVE_RESULT_OBJECT = 0x0c;
+    public static final int MOVE_EXCEPTION = 0x0d;
+    public static final int RETURN_VOID = 0x0e;
+    public static final int RETURN = 0x0f;
+    public static final int RETURN_WIDE = 0x10;
+    public static final int RETURN_OBJECT = 0x11;
+    public static final int CONST_4 = 0x12;
+    public static final int CONST_16 = 0x13;
+    public static final int CONST = 0x14;
+    public static final int CONST_HIGH16 = 0x15;
+    public static final int CONST_WIDE_16 = 0x16;
+    public static final int CONST_WIDE_32 = 0x17;
+    public static final int CONST_WIDE = 0x18;
+    public static final int CONST_WIDE_HIGH16 = 0x19;
+    public static final int CONST_STRING = 0x1a;
+    public static final int CONST_STRING_JUMBO = 0x1b;
+    public static final int CONST_CLASS = 0x1c;
+    public static final int MONITOR_ENTER = 0x1d;
+    public static final int MONITOR_EXIT = 0x1e;
+    public static final int CHECK_CAST = 0x1f;
+    public static final int INSTANCE_OF = 0x20;
+    public static final int ARRAY_LENGTH = 0x21;
+    public static final int NEW_INSTANCE = 0x22;
+    public static final int NEW_ARRAY = 0x23;
+    public static final int FILLED_NEW_ARRAY = 0x24;
+    public static final int FILLED_NEW_ARRAY_RANGE = 0x25;
+    public static final int FILL_ARRAY_DATA = 0x26;
+    public static final int THROW = 0x27;
+    public static final int GOTO = 0x28;
+    public static final int GOTO_16 = 0x29;
+    public static final int GOTO_32 = 0x2a;
+    public static final int PACKED_SWITCH = 0x2b;
+    public static final int SPARSE_SWITCH = 0x2c;
+    public static final int CMPL_FLOAT = 0x2d;
+    public static final int CMPG_FLOAT = 0x2e;
+    public static final int CMPL_DOUBLE = 0x2f;
+    public static final int CMPG_DOUBLE = 0x30;
+    public static final int CMP_LONG = 0x31;
+    public static final int IF_EQ = 0x32;
+    public static final int IF_NE = 0x33;
+    public static final int IF_LT = 0x34;
+    public static final int IF_GE = 0x35;
+    public static final int IF_GT = 0x36;
+    public static final int IF_LE = 0x37;
+    public static final int IF_EQZ = 0x38;
+    public static final int IF_NEZ = 0x39;
+    public static final int IF_LTZ = 0x3a;
+    public static final int IF_GEZ = 0x3b;
+    public static final int IF_GTZ = 0x3c;
+    public static final int IF_LEZ = 0x3d;
+    public static final int AGET = 0x44;
+    public static final int AGET_WIDE = 0x45;
+    public static final int AGET_OBJECT = 0x46;
+    public static final int AGET_BOOLEAN = 0x47;
+    public static final int AGET_BYTE = 0x48;
+    public static final int AGET_CHAR = 0x49;
+    public static final int AGET_SHORT = 0x4a;
+    public static final int APUT = 0x4b;
+    public static final int APUT_WIDE = 0x4c;
+    public static final int APUT_OBJECT = 0x4d;
+    public static final int APUT_BOOLEAN = 0x4e;
+    public static final int APUT_BYTE = 0x4f;
+    public static final int APUT_CHAR = 0x50;
+    public static final int APUT_SHORT = 0x51;
+    public static final int IGET = 0x52;
+    public static final int IGET_WIDE = 0x53;
+    public static final int IGET_OBJECT = 0x54;
+    public static final int IGET_BOOLEAN = 0x55;
+    public static final int IGET_BYTE = 0x56;
+    public static final int IGET_CHAR = 0x57;
+    public static final int IGET_SHORT = 0x58;
+    public static final int IPUT = 0x59;
+    public static final int IPUT_WIDE = 0x5a;
+    public static final int IPUT_OBJECT = 0x5b;
+    public static final int IPUT_BOOLEAN = 0x5c;
+    public static final int IPUT_BYTE = 0x5d;
+    public static final int IPUT_CHAR = 0x5e;
+    public static final int IPUT_SHORT = 0x5f;
+    public static final int SGET = 0x60;
+    public static final int SGET_WIDE = 0x61;
+    public static final int SGET_OBJECT = 0x62;
+    public static final int SGET_BOOLEAN = 0x63;
+    public static final int SGET_BYTE = 0x64;
+    public static final int SGET_CHAR = 0x65;
+    public static final int SGET_SHORT = 0x66;
+    public static final int SPUT = 0x67;
+    public static final int SPUT_WIDE = 0x68;
+    public static final int SPUT_OBJECT = 0x69;
+    public static final int SPUT_BOOLEAN = 0x6a;
+    public static final int SPUT_BYTE = 0x6b;
+    public static final int SPUT_CHAR = 0x6c;
+    public static final int SPUT_SHORT = 0x6d;
+    public static final int INVOKE_VIRTUAL = 0x6e;
+    public static final int INVOKE_SUPER = 0x6f;
+    public static final int INVOKE_DIRECT = 0x70;
+    public static final int INVOKE_STATIC = 0x71;
+    public static final int INVOKE_INTERFACE = 0x72;
+    public static final int INVOKE_VIRTUAL_RANGE = 0x74;
+    public static final int INVOKE_SUPER_RANGE = 0x75;
+    public static final int INVOKE_DIRECT_RANGE = 0x76;
+    public static final int INVOKE_STATIC_RANGE = 0x77;
+    public static final int INVOKE_INTERFACE_RANGE = 0x78;
+    public static final int NEG_INT = 0x7b;
+    public static final int NOT_INT = 0x7c;
+    public static final int NEG_LONG = 0x7d;
+    public static final int NOT_LONG = 0x7e;
+    public static final int NEG_FLOAT = 0x7f;
+    public static final int NEG_DOUBLE = 0x80;
+    public static final int INT_TO_LONG = 0x81;
+    public static final int INT_TO_FLOAT = 0x82;
+    public static final int INT_TO_DOUBLE = 0x83;
+    public static final int LONG_TO_INT = 0x84;
+    public static final int LONG_TO_FLOAT = 0x85;
+    public static final int LONG_TO_DOUBLE = 0x86;
+    public static final int FLOAT_TO_INT = 0x87;
+    public static final int FLOAT_TO_LONG = 0x88;
+    public static final int FLOAT_TO_DOUBLE = 0x89;
+    public static final int DOUBLE_TO_INT = 0x8a;
+    public static final int DOUBLE_TO_LONG = 0x8b;
+    public static final int DOUBLE_TO_FLOAT = 0x8c;
+    public static final int INT_TO_BYTE = 0x8d;
+    public static final int INT_TO_CHAR = 0x8e;
+    public static final int INT_TO_SHORT = 0x8f;
+    public static final int ADD_INT = 0x90;
+    public static final int SUB_INT = 0x91;
+    public static final int MUL_INT = 0x92;
+    public static final int DIV_INT = 0x93;
+    public static final int REM_INT = 0x94;
+    public static final int AND_INT = 0x95;
+    public static final int OR_INT = 0x96;
+    public static final int XOR_INT = 0x97;
+    public static final int SHL_INT = 0x98;
+    public static final int SHR_INT = 0x99;
+    public static final int USHR_INT = 0x9a;
+    public static final int ADD_LONG = 0x9b;
+    public static final int SUB_LONG = 0x9c;
+    public static final int MUL_LONG = 0x9d;
+    public static final int DIV_LONG = 0x9e;
+    public static final int REM_LONG = 0x9f;
+    public static final int AND_LONG = 0xa0;
+    public static final int OR_LONG = 0xa1;
+    public static final int XOR_LONG = 0xa2;
+    public static final int SHL_LONG = 0xa3;
+    public static final int SHR_LONG = 0xa4;
+    public static final int USHR_LONG = 0xa5;
+    public static final int ADD_FLOAT = 0xa6;
+    public static final int SUB_FLOAT = 0xa7;
+    public static final int MUL_FLOAT = 0xa8;
+    public static final int DIV_FLOAT = 0xa9;
+    public static final int REM_FLOAT = 0xaa;
+    public static final int ADD_DOUBLE = 0xab;
+    public static final int SUB_DOUBLE = 0xac;
+    public static final int MUL_DOUBLE = 0xad;
+    public static final int DIV_DOUBLE = 0xae;
+    public static final int REM_DOUBLE = 0xaf;
+    public static final int ADD_INT_2ADDR = 0xb0;
+    public static final int SUB_INT_2ADDR = 0xb1;
+    public static final int MUL_INT_2ADDR = 0xb2;
+    public static final int DIV_INT_2ADDR = 0xb3;
+    public static final int REM_INT_2ADDR = 0xb4;
+    public static final int AND_INT_2ADDR = 0xb5;
+    public static final int OR_INT_2ADDR = 0xb6;
+    public static final int XOR_INT_2ADDR = 0xb7;
+    public static final int SHL_INT_2ADDR = 0xb8;
+    public static final int SHR_INT_2ADDR = 0xb9;
+    public static final int USHR_INT_2ADDR = 0xba;
+    public static final int ADD_LONG_2ADDR = 0xbb;
+    public static final int SUB_LONG_2ADDR = 0xbc;
+    public static final int MUL_LONG_2ADDR = 0xbd;
+    public static final int DIV_LONG_2ADDR = 0xbe;
+    public static final int REM_LONG_2ADDR = 0xbf;
+    public static final int AND_LONG_2ADDR = 0xc0;
+    public static final int OR_LONG_2ADDR = 0xc1;
+    public static final int XOR_LONG_2ADDR = 0xc2;
+    public static final int SHL_LONG_2ADDR = 0xc3;
+    public static final int SHR_LONG_2ADDR = 0xc4;
+    public static final int USHR_LONG_2ADDR = 0xc5;
+    public static final int ADD_FLOAT_2ADDR = 0xc6;
+    public static final int SUB_FLOAT_2ADDR = 0xc7;
+    public static final int MUL_FLOAT_2ADDR = 0xc8;
+    public static final int DIV_FLOAT_2ADDR = 0xc9;
+    public static final int REM_FLOAT_2ADDR = 0xca;
+    public static final int ADD_DOUBLE_2ADDR = 0xcb;
+    public static final int SUB_DOUBLE_2ADDR = 0xcc;
+    public static final int MUL_DOUBLE_2ADDR = 0xcd;
+    public static final int DIV_DOUBLE_2ADDR = 0xce;
+    public static final int REM_DOUBLE_2ADDR = 0xcf;
+    public static final int ADD_INT_LIT16 = 0xd0;
+    public static final int RSUB_INT = 0xd1;
+    public static final int MUL_INT_LIT16 = 0xd2;
+    public static final int DIV_INT_LIT16 = 0xd3;
+    public static final int REM_INT_LIT16 = 0xd4;
+    public static final int AND_INT_LIT16 = 0xd5;
+    public static final int OR_INT_LIT16 = 0xd6;
+    public static final int XOR_INT_LIT16 = 0xd7;
+    public static final int ADD_INT_LIT8 = 0xd8;
+    public static final int RSUB_INT_LIT8 = 0xd9;
+    public static final int MUL_INT_LIT8 = 0xda;
+    public static final int DIV_INT_LIT8 = 0xdb;
+    public static final int REM_INT_LIT8 = 0xdc;
+    public static final int AND_INT_LIT8 = 0xdd;
+    public static final int OR_INT_LIT8 = 0xde;
+    public static final int XOR_INT_LIT8 = 0xdf;
+    public static final int SHL_INT_LIT8 = 0xe0;
+    public static final int SHR_INT_LIT8 = 0xe1;
+    public static final int USHR_INT_LIT8 = 0xe2;
+    public static final int CONST_CLASS_JUMBO = 0x00ff;
+    public static final int CHECK_CAST_JUMBO = 0x01ff;
+    public static final int INSTANCE_OF_JUMBO = 0x02ff;
+    public static final int NEW_INSTANCE_JUMBO = 0x03ff;
+    public static final int NEW_ARRAY_JUMBO = 0x04ff;
+    public static final int FILLED_NEW_ARRAY_JUMBO = 0x05ff;
+    public static final int IGET_JUMBO = 0x06ff;
+    public static final int IGET_WIDE_JUMBO = 0x07ff;
+    public static final int IGET_OBJECT_JUMBO = 0x08ff;
+    public static final int IGET_BOOLEAN_JUMBO = 0x09ff;
+    public static final int IGET_BYTE_JUMBO = 0x0aff;
+    public static final int IGET_CHAR_JUMBO = 0x0bff;
+    public static final int IGET_SHORT_JUMBO = 0x0cff;
+    public static final int IPUT_JUMBO = 0x0dff;
+    public static final int IPUT_WIDE_JUMBO = 0x0eff;
+    public static final int IPUT_OBJECT_JUMBO = 0x0fff;
+    public static final int IPUT_BOOLEAN_JUMBO = 0x10ff;
+    public static final int IPUT_BYTE_JUMBO = 0x11ff;
+    public static final int IPUT_CHAR_JUMBO = 0x12ff;
+    public static final int IPUT_SHORT_JUMBO = 0x13ff;
+    public static final int SGET_JUMBO = 0x14ff;
+    public static final int SGET_WIDE_JUMBO = 0x15ff;
+    public static final int SGET_OBJECT_JUMBO = 0x16ff;
+    public static final int SGET_BOOLEAN_JUMBO = 0x17ff;
+    public static final int SGET_BYTE_JUMBO = 0x18ff;
+    public static final int SGET_CHAR_JUMBO = 0x19ff;
+    public static final int SGET_SHORT_JUMBO = 0x1aff;
+    public static final int SPUT_JUMBO = 0x1bff;
+    public static final int SPUT_WIDE_JUMBO = 0x1cff;
+    public static final int SPUT_OBJECT_JUMBO = 0x1dff;
+    public static final int SPUT_BOOLEAN_JUMBO = 0x1eff;
+    public static final int SPUT_BYTE_JUMBO = 0x1fff;
+    public static final int SPUT_CHAR_JUMBO = 0x20ff;
+    public static final int SPUT_SHORT_JUMBO = 0x21ff;
+    public static final int INVOKE_VIRTUAL_JUMBO = 0x22ff;
+    public static final int INVOKE_SUPER_JUMBO = 0x23ff;
+    public static final int INVOKE_DIRECT_JUMBO = 0x24ff;
+    public static final int INVOKE_STATIC_JUMBO = 0x25ff;
+    public static final int INVOKE_INTERFACE_JUMBO = 0x26ff;
+    // END(opcodes)
+
+    // TODO: Generate these payload opcodes with opcode-gen.
+
+    /**
+     * special pseudo-opcode value for packed-switch data payload
+     * instructions
+     */
+    public static final int PACKED_SWITCH_PAYLOAD = 0x100;
+
+    /** special pseudo-opcode value for packed-switch data payload
+     * instructions
+     */
+    public static final int SPARSE_SWITCH_PAYLOAD = 0x200;
+
+    /** special pseudo-opcode value for fill-array-data data payload
+     * instructions
+     */
+    public static final int FILL_ARRAY_DATA_PAYLOAD = 0x300;
+
+    /**
+     * This class is uninstantiable.
+     */
+    private Opcodes() {
+        // This space intentionally left blank.
+    }
+
+    /**
+     * Determines if the given opcode has the right "shape" to be
+     * valid. This includes the range {@code 0x00..0xfe}, the range
+     * {@code 0x00ff..0xffff} where the low-order byte is {@code
+     * 0xff}, and the special opcode values {@code SPECIAL_FORMAT} and
+     * {@code NO_NEXT}. Note that not all of the opcode values that
+     * pass this test are in fact used. This method is meant to
+     * perform a quick check to reject blatantly wrong values (e.g.
+     * when validating arguments).
+     *
+     * @param opcode the opcode value
+     * @return {@code true} iff the value has the right "shape" to be
+     * possibly valid
+     */
+    public static boolean isValidShape(int opcode) {
+        /*
+         * Note: This method bakes in knowledge that all opcodes are
+         * either single-byte or of the forms (byteValue << 8) or
+         * ((byteValue << 8) 0xff).
+         */
+
+        // Note: SPECIAL_FORMAT == NO_NEXT.
+        if (opcode < SPECIAL_FORMAT) {
+            return false;
+        } else if (opcode == SPECIAL_FORMAT) {
+            return true;
+        }
+
+        int lowByte = opcode & 0xff;
+        if ((lowByte == 0) || (lowByte == 0xff)) {
+            return true;
+        }
+
+        return (opcode & 0xff00) == 0;
+    }
+
+    /**
+     * Gets the opcode out of an opcode unit, the latter of which may also
+     * include one or more argument values.
+     */
+    public static int extractOpcodeFromUnit(int opcodeUnit) {
+        /*
+         * Note: This method bakes in knowledge that all opcodes are
+         * either single-byte or of the forms (byteValue << 8) or
+         * ((byteValue << 8) 0xff).
+         */
+
+        int lowByte = opcodeUnit & 0xff;
+        return ((lowByte == 0) || (lowByte == 0xff)) ? opcodeUnit : lowByte;
+    }
+}
diff --git a/dx/src/com/android/dx/io/ProtoId.java b/dx/src/com/android/dx/io/ProtoId.java
new file mode 100644
index 0000000..98c0777
--- /dev/null
+++ b/dx/src/com/android/dx/io/ProtoId.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io;
+
+import com.android.dx.util.Unsigned;
+
+public final class ProtoId implements Comparable<ProtoId> {
+    private final DexBuffer buffer;
+    private final int shortyIndex;
+    private final int returnTypeIndex;
+    private final int parametersOffset;
+
+    public ProtoId(DexBuffer buffer, int shortyIndex, int returnTypeIndex, int parametersOffset) {
+        this.buffer = buffer;
+        this.shortyIndex = shortyIndex;
+        this.returnTypeIndex = returnTypeIndex;
+        this.parametersOffset = parametersOffset;
+    }
+
+    public int compareTo(ProtoId other) {
+        if (returnTypeIndex != other.returnTypeIndex) {
+            return Unsigned.compare(returnTypeIndex, other.returnTypeIndex);
+        }
+        return Unsigned.compare(parametersOffset, other.parametersOffset);
+    }
+
+    public int getShortyIndex() {
+        return shortyIndex;
+    }
+
+    public int getReturnTypeIndex() {
+        return returnTypeIndex;
+    }
+
+    public int getParametersOffset() {
+        return parametersOffset;
+    }
+
+    public void writeTo(DexBuffer.Section out) {
+        out.writeInt(shortyIndex);
+        out.writeInt(returnTypeIndex);
+        out.writeInt(parametersOffset);
+    }
+
+    @Override public String toString() {
+        if (buffer == null) {
+            return shortyIndex + " " + returnTypeIndex + " " + parametersOffset;
+        }
+
+        return buffer.strings().get(shortyIndex)
+                + ": " + buffer.typeNames().get(returnTypeIndex)
+                + " " + buffer.readTypeList(parametersOffset);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/AddressMap.java b/dx/src/com/android/dx/io/instructions/AddressMap.java
new file mode 100644
index 0000000..a8dbe0b
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/AddressMap.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import java.io.EOFException;
+import java.util.HashMap;
+
+/**
+ * Map from addresses to addresses, where addresses are all
+ * {@code int}s.
+ */
+public final class AddressMap {
+    /** underlying map. TODO: This might be too inefficient. */
+    private final HashMap<Integer,Integer> map;
+
+    /**
+     * Constructs an instance.
+     */
+    public AddressMap() {
+        map = new HashMap<Integer,Integer>();
+    }
+
+    /**
+     * Gets the value address corresponding to the given key address. Returns
+     * {@code -1} if there is no mapping.
+     */
+    public int get(int keyAddress) {
+        Integer value = map.get(keyAddress);
+        return (value == null) ? -1 : value;
+    }
+
+    /**
+     * Sets the value address associated with the given key address.
+     */
+    public void put(int keyAddress, int valueAddress) {
+        map.put(keyAddress, valueAddress);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/BaseCodeCursor.java b/dx/src/com/android/dx/io/instructions/BaseCodeCursor.java
new file mode 100644
index 0000000..6915fa8
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/BaseCodeCursor.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import java.io.EOFException;
+
+/**
+ * Base implementation of {@link CodeCursor}.
+ */
+public abstract class BaseCodeCursor implements CodeCursor {
+    /** base address map */
+    private final AddressMap baseAddressMap;
+
+    /** next index within {@link #array} to read from or write to */
+    private int cursor;
+
+    /**
+     * Constructs an instance.
+     */
+    public BaseCodeCursor() {
+        this.baseAddressMap = new AddressMap();
+        this.cursor = 0;
+    }
+
+    /** @inheritDoc */
+    public final int cursor() {
+        return cursor;
+    }
+
+    /** @inheritDoc */
+    public final int baseAddressForCursor() {
+        int mapped = baseAddressMap.get(cursor);
+        return (mapped >= 0) ? mapped : cursor;
+    }
+
+    /** @inheritDoc */
+    public final void setBaseAddress(int targetAddress, int baseAddress) {
+        baseAddressMap.put(targetAddress, baseAddress);
+    }
+
+    /**
+     * Advance the cursor by the indicated amount.
+     */
+    protected final void advance(int amount) {
+        cursor += amount;
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/CodeCursor.java b/dx/src/com/android/dx/io/instructions/CodeCursor.java
new file mode 100644
index 0000000..68eb9c9
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/CodeCursor.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * Cursor over code units, for reading or writing out Dalvik bytecode.
+ */
+public interface CodeCursor {
+    /**
+     * Gets the cursor. The cursor is the offset in code units from
+     * the start of the input of the next code unit to be read or
+     * written, where the input generally consists of the code for a
+     * single method.
+     */
+    public int cursor();
+
+    /**
+     * Gets the base address associated with the current cursor. This
+     * differs from the cursor value when explicitly set (by {@link
+     * #setBaseAddress). This is used, in particular, to convey base
+     * addresses to switch data payload instructions, whose relative
+     * addresses are relative to the address of a dependant switch
+     * instruction.
+     */
+    public int baseAddressForCursor();
+
+    /**
+     * Sets the base address for the given target address to be as indicated.
+     *
+     * @see #baseAddressForCursor
+     */
+    public void setBaseAddress(int targetAddress, int baseAddress);
+}
diff --git a/dx/src/com/android/dx/io/instructions/CodeInput.java b/dx/src/com/android/dx/io/instructions/CodeInput.java
new file mode 100644
index 0000000..41a5ef7
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/CodeInput.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import java.io.EOFException;
+
+/**
+ * Input stream of code units, for reading in Dalvik bytecode.
+ */
+public interface CodeInput extends CodeCursor {
+    /**
+     * Returns whether there are any more code units to read. This
+     * is analogous to {@code hasNext()} on an interator.
+     */
+    public boolean hasMore();
+
+    /**
+     * Reads a code unit.
+     */
+    public int read() throws EOFException;
+
+    /**
+     * Reads two code units, treating them as a little-endian {@code int}.
+     */
+    public int readInt() throws EOFException;
+
+    /**
+     * Reads four code units, treating them as a little-endian {@code long}.
+     */
+    public long readLong() throws EOFException;
+}
diff --git a/dx/src/com/android/dx/io/instructions/CodeOutput.java b/dx/src/com/android/dx/io/instructions/CodeOutput.java
new file mode 100644
index 0000000..7d0077e
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/CodeOutput.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * Output stream of code units, for writing out Dalvik bytecode.
+ */
+public interface CodeOutput extends CodeCursor {
+    /**
+     * Writes a code unit.
+     */
+    public void write(short codeUnit);
+
+    /**
+     * Writes two code units.
+     */
+    public void write(short u0, short u1);
+
+    /**
+     * Writes three code units.
+     */
+    public void write(short u0, short u1, short u2);
+
+    /**
+     * Writes four code units.
+     */
+    public void write(short u0, short u1, short u2, short u3);
+
+    /**
+     * Writes five code units.
+     */
+    public void write(short u0, short u1, short u2, short u3, short u4);
+
+    /**
+     * Writes an {@code int}, little-endian.
+     */
+    public void writeInt(int value);
+
+    /**
+     * Writes a {@code long}, little-endian.
+     */
+    public void writeLong(long value);
+
+    /**
+     * Writes the contents of the given array.
+     */
+    public void write(byte[] data);
+
+    /**
+     * Writes the contents of the given array.
+     */
+    public void write(short[] data);
+
+    /**
+     * Writes the contents of the given array.
+     */
+    public void write(int[] data);
+
+    /**
+     * Writes the contents of the given array.
+     */
+    public void write(long[] data);
+}
diff --git a/dx/src/com/android/dx/io/instructions/DecodedInstruction.java b/dx/src/com/android/dx/io/instructions/DecodedInstruction.java
new file mode 100644
index 0000000..e418a1c
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/DecodedInstruction.java
@@ -0,0 +1,479 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+import com.android.dx.io.OpcodeInfo;
+import com.android.dx.io.Opcodes;
+import com.android.dx.util.DexException;
+import com.android.dx.util.Hex;
+
+import java.io.EOFException;
+
+/**
+ * A decoded Dalvik instruction. This consists of a format codec, a
+ * numeric opcode, an optional index type, and any additional
+ * arguments of the instruction. The additional arguments (if any) are
+ * represented as uninterpreted data.
+ *
+ * <p><b>Note:</b> The names of the arguments are <i>not</i> meant to
+ * match the names given in the Dalvik instruction format
+ * specification, specification which just names fields (somewhat)
+ * arbitrarily alphabetically from A. In this class, non-register
+ * fields are given descriptive names and register fields are
+ * consistently named alphabetically.</p>
+ */
+public abstract class DecodedInstruction {
+    /** non-null; instruction format / codec */
+    private final InstructionCodec format;
+
+    /** opcode number */
+    private final int opcode;
+
+    /** constant index argument */
+    private final int index;
+
+    /** null-ok; index type */
+    private final IndexType indexType;
+
+    /**
+     * target address argument. This is an absolute address, not just
+     * a signed offset. <b>Note:</b> The address is unsigned, even
+     * though it is stored in an {@code int}.
+     */
+    private final int target;
+
+    /**
+     * literal value argument; also used for special verification error
+     * constants (formats 20bc and 40sc) as well as should-be-zero values
+     * (formats 10x, 20t, 30t, and 32x)
+     */
+    private final long literal;
+
+    /**
+     * Decodes an instruction from the given input source.
+     */
+    public static DecodedInstruction decode(CodeInput in) throws EOFException {
+        int opcodeUnit = in.read();
+        int opcode = Opcodes.extractOpcodeFromUnit(opcodeUnit);
+        InstructionCodec format = OpcodeInfo.getFormat(opcode);
+
+        return format.decode(opcodeUnit, in);
+    }
+
+    /**
+     * Decodes an array of instructions. The result has non-null
+     * elements at each offset that represents the start of an
+     * instruction.
+     */
+    public static DecodedInstruction[] decodeAll(short[] encodedInstructions) {
+        int size = encodedInstructions.length;
+        DecodedInstruction[] decoded = new DecodedInstruction[size];
+        ShortArrayCodeInput in = new ShortArrayCodeInput(encodedInstructions);
+
+        try {
+            while (in.hasMore()) {
+                decoded[in.cursor()] = DecodedInstruction.decode(in);
+            }
+        } catch (EOFException ex) {
+            throw new AssertionError("shouldn't happen");
+        }
+
+        return decoded;
+    }
+
+    /**
+     * Constructs an instance.
+     */
+    public DecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal) {
+        if (format == null) {
+            throw new NullPointerException("format == null");
+        }
+
+        if (!Opcodes.isValidShape(opcode)) {
+            throw new IllegalArgumentException("invalid opcode");
+        }
+
+        this.format = format;
+        this.opcode = opcode;
+        this.index = index;
+        this.indexType = indexType;
+        this.target = target;
+        this.literal = literal;
+    }
+
+    public final InstructionCodec getFormat() {
+        return format;
+    }
+
+    public final int getOpcode() {
+        return opcode;
+    }
+
+    /**
+     * Gets the opcode, as a code unit.
+     */
+    public final short getOpcodeUnit() {
+        return (short) opcode;
+    }
+
+    public final int getIndex() {
+        return index;
+    }
+
+    /**
+     * Gets the index, as a code unit.
+     */
+    public final short getIndexUnit() {
+        return (short) index;
+    }
+
+    public final IndexType getIndexType() {
+        return indexType;
+    }
+
+    /**
+     * Gets the raw target.
+     */
+    public final int getTarget() {
+        return target;
+    }
+
+    /**
+     * Gets the target as a relative offset from the given address.
+     */
+    public final int getTarget(int baseAddress) {
+        return target - baseAddress;
+    }
+
+    /**
+     * Gets the target as a relative offset from the given base
+     * address, as a code unit. This will throw if the value is out of
+     * the range of a signed code unit.
+     */
+    public final short getTargetUnit(int baseAddress) {
+        int relativeTarget = getTarget(baseAddress);
+
+        if (relativeTarget != (short) relativeTarget) {
+            throw new DexException("Target out of range: "
+                    + Hex.s4(relativeTarget));
+        }
+
+        return (short) relativeTarget;
+    }
+
+    /**
+     * Gets the target as a relative offset from the given base
+     * address, masked to be a byte in size. This will throw if the
+     * value is out of the range of a signed byte.
+     */
+    public final int getTargetByte(int baseAddress) {
+        int relativeTarget = getTarget(baseAddress);
+
+        if (relativeTarget != (byte) relativeTarget) {
+            throw new DexException("Target out of range: "
+                    + Hex.s4(relativeTarget));
+        }
+
+        return relativeTarget & 0xff;
+    }
+
+    public final long getLiteral() {
+        return literal;
+    }
+
+    /**
+     * Gets the literal value, masked to be an int in size. This will
+     * throw if the value is out of the range of a signed int.
+     */
+    public final int getLiteralInt() {
+        if (literal != (int) literal) {
+            throw new DexException("Literal out of range: " + Hex.u8(literal));
+        }
+
+        return (int) literal;
+    }
+
+    /**
+     * Gets the literal value, as a code unit. This will throw if the
+     * value is out of the range of a signed code unit.
+     */
+    public final short getLiteralUnit() {
+        if (literal != (short) literal) {
+            throw new DexException("Literal out of range: " + Hex.u8(literal));
+        }
+
+        return (short) literal;
+    }
+
+    /**
+     * Gets the literal value, masked to be a byte in size. This will
+     * throw if the value is out of the range of a signed byte.
+     */
+    public final int getLiteralByte() {
+        if (literal != (byte) literal) {
+            throw new DexException("Literal out of range: " + Hex.u8(literal));
+        }
+
+        return (int) literal & 0xff;
+    }
+
+    /**
+     * Gets the literal value, masked to be a nibble in size. This
+     * will throw if the value is out of the range of a signed nibble.
+     */
+    public final int getLiteralNibble() {
+        if ((literal < -8) || (literal > 7)) {
+            throw new DexException("Literal out of range: " + Hex.u8(literal));
+        }
+
+        return (int) literal & 0xf;
+    }
+
+    public abstract int getRegisterCount();
+
+    public int getA() {
+        return 0;
+    }
+
+    public int getB() {
+        return 0;
+    }
+
+    public int getC() {
+        return 0;
+    }
+
+    public int getD() {
+        return 0;
+    }
+
+    public int getE() {
+        return 0;
+    }
+
+    /**
+     * Gets the register count, as a code unit. This will throw if the
+     * value is out of the range of an unsigned code unit.
+     */
+    public final short getRegisterCountUnit() {
+        int registerCount = getRegisterCount();
+
+        if ((registerCount & ~0xffff) != 0) {
+            throw new DexException("Register count out of range: "
+                    + Hex.u8(registerCount));
+        }
+
+        return (short) registerCount;
+    }
+
+    /**
+     * Gets the A register number, as a code unit. This will throw if the
+     * value is out of the range of an unsigned code unit.
+     */
+    public final short getAUnit() {
+        int a = getA();
+
+        if ((a & ~0xffff) != 0) {
+            throw new DexException("Register A out of range: " + Hex.u8(a));
+        }
+
+        return (short) a;
+    }
+
+    /**
+     * Gets the A register number, as a byte. This will throw if the
+     * value is out of the range of an unsigned byte.
+     */
+    public final short getAByte() {
+        int a = getA();
+
+        if ((a & ~0xff) != 0) {
+            throw new DexException("Register A out of range: " + Hex.u8(a));
+        }
+
+        return (short) a;
+    }
+
+    /**
+     * Gets the A register number, as a nibble. This will throw if the
+     * value is out of the range of an unsigned nibble.
+     */
+    public final short getANibble() {
+        int a = getA();
+
+        if ((a & ~0xf) != 0) {
+            throw new DexException("Register A out of range: " + Hex.u8(a));
+        }
+
+        return (short) a;
+    }
+
+    /**
+     * Gets the B register number, as a code unit. This will throw if the
+     * value is out of the range of an unsigned code unit.
+     */
+    public final short getBUnit() {
+        int b = getB();
+
+        if ((b & ~0xffff) != 0) {
+            throw new DexException("Register B out of range: " + Hex.u8(b));
+        }
+
+        return (short) b;
+    }
+
+    /**
+     * Gets the B register number, as a byte. This will throw if the
+     * value is out of the range of an unsigned byte.
+     */
+    public final short getBByte() {
+        int b = getB();
+
+        if ((b & ~0xff) != 0) {
+            throw new DexException("Register B out of range: " + Hex.u8(b));
+        }
+
+        return (short) b;
+    }
+
+    /**
+     * Gets the B register number, as a nibble. This will throw if the
+     * value is out of the range of an unsigned nibble.
+     */
+    public final short getBNibble() {
+        int b = getB();
+
+        if ((b & ~0xf) != 0) {
+            throw new DexException("Register B out of range: " + Hex.u8(b));
+        }
+
+        return (short) b;
+    }
+
+    /**
+     * Gets the C register number, as a code unit. This will throw if the
+     * value is out of the range of an unsigned code unit.
+     */
+    public final short getCUnit() {
+        int c = getC();
+
+        if ((c & ~0xffff) != 0) {
+            throw new DexException("Register C out of range: " + Hex.u8(c));
+        }
+
+        return (short) c;
+    }
+
+    /**
+     * Gets the C register number, as a byte. This will throw if the
+     * value is out of the range of an unsigned byte.
+     */
+    public final short getCByte() {
+        int c = getC();
+
+        if ((c & ~0xff) != 0) {
+            throw new DexException("Register C out of range: " + Hex.u8(c));
+        }
+
+        return (short) c;
+    }
+
+    /**
+     * Gets the C register number, as a nibble. This will throw if the
+     * value is out of the range of an unsigned nibble.
+     */
+    public final short getCNibble() {
+        int c = getC();
+
+        if ((c & ~0xf) != 0) {
+            throw new DexException("Register C out of range: " + Hex.u8(c));
+        }
+
+        return (short) c;
+    }
+
+    /**
+     * Gets the D register number, as a code unit. This will throw if the
+     * value is out of the range of an unsigned code unit.
+     */
+    public final short getDUnit() {
+        int d = getD();
+
+        if ((d & ~0xffff) != 0) {
+            throw new DexException("Register D out of range: " + Hex.u8(d));
+        }
+
+        return (short) d;
+    }
+
+    /**
+     * Gets the D register number, as a byte. This will throw if the
+     * value is out of the range of an unsigned byte.
+     */
+    public final short getDByte() {
+        int d = getD();
+
+        if ((d & ~0xff) != 0) {
+            throw new DexException("Register D out of range: " + Hex.u8(d));
+        }
+
+        return (short) d;
+    }
+
+    /**
+     * Gets the D register number, as a nibble. This will throw if the
+     * value is out of the range of an unsigned nibble.
+     */
+    public final short getDNibble() {
+        int d = getD();
+
+        if ((d & ~0xf) != 0) {
+            throw new DexException("Register D out of range: " + Hex.u8(d));
+        }
+
+        return (short) d;
+    }
+
+    /**
+     * Gets the E register number, as a nibble. This will throw if the
+     * value is out of the range of an unsigned nibble.
+     */
+    public final short getENibble() {
+        int e = getE();
+
+        if ((e & ~0xf) != 0) {
+            throw new DexException("Register E out of range: " + Hex.u8(e));
+        }
+
+        return (short) e;
+    }
+
+    /**
+     * Encodes this instance to the given output.
+     */
+    public final void encode(CodeOutput out) {
+        format.encode(this, out);
+    }
+
+    /**
+     * Returns an instance just like this one, except with the index replaced
+     * with the given one.
+     */
+    public abstract DecodedInstruction withIndex(int newIndex);
+}
diff --git a/dx/src/com/android/dx/io/instructions/FillArrayDataPayloadDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/FillArrayDataPayloadDecodedInstruction.java
new file mode 100644
index 0000000..64fc55b
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/FillArrayDataPayloadDecodedInstruction.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * A decoded Dalvik instruction which contains the payload for
+ * a {@code packed-switch} instruction.
+ */
+public final class FillArrayDataPayloadDecodedInstruction
+        extends DecodedInstruction {
+    /** data array */
+    private final Object data;
+
+    /** number of elements */
+    private final int size;
+
+    /** element width */
+    private final int elementWidth;
+
+    /**
+     * Constructs an instance. This private instance doesn't check the
+     * type of the data array.
+     */
+    private FillArrayDataPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, Object data, int size, int elementWidth) {
+        super(format, opcode, 0, null, 0, 0L);
+
+        this.data = data;
+        this.size = size;
+        this.elementWidth = elementWidth;
+    }
+
+    /**
+     * Constructs an instance.
+     */
+    public FillArrayDataPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, byte[] data) {
+        this(format, opcode, data, data.length, 1);
+    }
+
+    /**
+     * Constructs an instance.
+     */
+    public FillArrayDataPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, short[] data) {
+        this(format, opcode, data, data.length, 2);
+    }
+
+    /**
+     * Constructs an instance.
+     */
+    public FillArrayDataPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, int[] data) {
+        this(format, opcode, data, data.length, 4);
+    }
+
+    /**
+     * Constructs an instance.
+     */
+    public FillArrayDataPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, long[] data) {
+        this(format, opcode, data, data.length, 8);
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 0;
+    }
+
+    public short getElementWidthUnit() {
+        return (short) elementWidth;
+    }
+
+    public int getSize() {
+        return size;
+    }
+
+    public Object getData() {
+        return data;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        throw new UnsupportedOperationException("no index in instruction");
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/FiveRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/FiveRegisterDecodedInstruction.java
new file mode 100644
index 0000000..6e14d34
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/FiveRegisterDecodedInstruction.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has five register arguments.
+ */
+public final class FiveRegisterDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /** register argument "B" */
+    private final int b;
+
+    /** register argument "C" */
+    private final int c;
+
+    /** register argument "D" */
+    private final int d;
+
+    /** register argument "E" */
+    private final int e;
+
+    /**
+     * Constructs an instance.
+     */
+    public FiveRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a, int b, int c, int d, int e) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+        this.b = b;
+        this.c = c;
+        this.d = d;
+        this.e = e;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 5;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public int getB() {
+        return b;
+    }
+
+    /** @inheritDoc */
+    public int getC() {
+        return c;
+    }
+
+    /** @inheritDoc */
+    public int getD() {
+        return d;
+    }
+
+    /** @inheritDoc */
+    public int getE() {
+        return e;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new FiveRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a, b, c, d, e);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/FourRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/FourRegisterDecodedInstruction.java
new file mode 100644
index 0000000..29836d0
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/FourRegisterDecodedInstruction.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has five register arguments.
+ */
+public final class FourRegisterDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /** register argument "B" */
+    private final int b;
+
+    /** register argument "C" */
+    private final int c;
+
+    /** register argument "D" */
+    private final int d;
+
+    /**
+     * Constructs an instance.
+     */
+    public FourRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a, int b, int c, int d) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+        this.b = b;
+        this.c = c;
+        this.d = d;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 4;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public int getB() {
+        return b;
+    }
+
+    /** @inheritDoc */
+    public int getC() {
+        return c;
+    }
+
+    /** @inheritDoc */
+    public int getD() {
+        return d;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new FourRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a, b, c, d);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/InstructionCodec.java b/dx/src/com/android/dx/io/instructions/InstructionCodec.java
new file mode 100644
index 0000000..0790385
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/InstructionCodec.java
@@ -0,0 +1,1099 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+import com.android.dx.io.OpcodeInfo;
+import com.android.dx.io.Opcodes;
+import com.android.dx.util.DexException;
+import com.android.dx.util.Hex;
+
+import java.io.EOFException;
+
+/**
+ * Representation of an instruction format, which knows how to decode into
+ * and encode from instances of {@link DecodedInstruction}.
+ */
+public enum InstructionCodec {
+    FORMAT_00X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcodeUnit, 0, null,
+                    0, 0L);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(insn.getOpcodeUnit());
+        }
+    },
+
+    FORMAT_10X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int literal = byte1(opcodeUnit); // should be zero
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(insn.getOpcodeUnit());
+        }
+    },
+
+    FORMAT_12X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int b = nibble3(opcodeUnit);
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcodeUnit(),
+                             makeByte(insn.getA(), insn.getB())));
+        }
+    },
+
+    FORMAT_11N() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int literal = (nibble3(opcodeUnit) << 28) >> 28; // sign-extend
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcodeUnit(),
+                             makeByte(insn.getA(), insn.getLiteralNibble())));
+        }
+    },
+
+    FORMAT_11X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(codeUnit(insn.getOpcode(), insn.getA()));
+        }
+    },
+
+    FORMAT_10T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int target = (byte) byte1(opcodeUnit); // sign-extend
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    baseAddress + target, 0L);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int relativeTarget = insn.getTargetByte(out.cursor());
+            out.write(codeUnit(insn.getOpcode(), relativeTarget));
+        }
+    },
+
+    FORMAT_20T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int literal = byte1(opcodeUnit); // should be zero
+            int target = (short) in.read(); // sign-extend
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    baseAddress + target, literal);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            short relativeTarget = insn.getTargetUnit(out.cursor());
+            out.write(insn.getOpcodeUnit(), relativeTarget);
+        }
+    },
+
+    FORMAT_20BC() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            // Note: We use the literal field to hold the decoded AA value.
+            int opcode = byte0(opcodeUnit);
+            int literal = byte1(opcodeUnit);
+            int index = in.read();
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcode, index, IndexType.VARIES,
+                    0, literal);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getLiteralByte()),
+                    insn.getIndexUnit());
+        }
+    },
+
+    FORMAT_22X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int b = in.read();
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    insn.getBUnit());
+        }
+    },
+
+    FORMAT_21T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int target = (short) in.read(); // sign-extend
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    baseAddress + target, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            short relativeTarget = insn.getTargetUnit(out.cursor());
+            out.write(codeUnit(insn.getOpcode(), insn.getA()), relativeTarget);
+        }
+    },
+
+    FORMAT_21S() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int literal = (short) in.read(); // sign-extend
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    insn.getLiteralUnit());
+        }
+    },
+
+    FORMAT_21H() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int literal = (short) in.read(); // sign-extend
+
+            /*
+             * Format 21h decodes differently depending on the opcode,
+             * because the "signed hat" might represent either a 32-
+             * or 64- bit value.
+             */
+            literal <<= (opcode == Opcodes.CONST_HIGH16) ? 16 : 48;
+
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            // See above.
+            int opcode = insn.getOpcode();
+            int shift = (opcode == Opcodes.CONST_HIGH16) ? 16 : 48;
+            short literal = (short) (insn.getLiteral() >> shift);
+
+            out.write(codeUnit(opcode, insn.getA()), literal);
+        }
+    },
+
+    FORMAT_21C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int index = in.read();
+            IndexType indexType = OpcodeInfo.getIndexType(opcode);
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, index, indexType,
+                    0, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    insn.getIndexUnit());
+        }
+    },
+
+    FORMAT_23X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int bc = in.read();
+            int b = byte0(bc);
+            int c = byte1(bc);
+            return new ThreeRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, 0L,
+                    a, b, c);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    codeUnit(insn.getB(), insn.getC()));
+        }
+    },
+
+    FORMAT_22B() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int bc = in.read();
+            int b = byte0(bc);
+            int literal = (byte) byte1(bc); // sign-extend
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    codeUnit(insn.getB(),
+                             insn.getLiteralByte()));
+        }
+    },
+
+    FORMAT_22T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int b = nibble3(opcodeUnit);
+            int target = (short) in.read(); // sign-extend
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    baseAddress + target, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            short relativeTarget = insn.getTargetUnit(out.cursor());
+            out.write(
+                    codeUnit(insn.getOpcode(),
+                             makeByte(insn.getA(), insn.getB())),
+                    relativeTarget);
+        }
+    },
+
+    FORMAT_22S() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int b = nibble3(opcodeUnit);
+            int literal = (short) in.read(); // sign-extend
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(),
+                             makeByte(insn.getA(), insn.getB())),
+                    insn.getLiteralUnit());
+        }
+    },
+
+    FORMAT_22C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int b = nibble3(opcodeUnit);
+            int index = in.read();
+            IndexType indexType = OpcodeInfo.getIndexType(opcode);
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, index, indexType,
+                    0, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(),
+                             makeByte(insn.getA(), insn.getB())),
+                    insn.getIndexUnit());
+        }
+    },
+
+    FORMAT_22CS() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = nibble2(opcodeUnit);
+            int b = nibble3(opcodeUnit);
+            int index = in.read();
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, index, IndexType.FIELD_OFFSET,
+                    0, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    codeUnit(insn.getOpcode(),
+                             makeByte(insn.getA(), insn.getB())),
+                    insn.getIndexUnit());
+        }
+    },
+
+    FORMAT_30T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int literal = byte1(opcodeUnit); // should be zero
+            int target = in.readInt();
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    baseAddress + target, literal);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int relativeTarget = insn.getTarget(out.cursor());
+            out.write(insn.getOpcodeUnit(),
+                    unit0(relativeTarget), unit1(relativeTarget));
+        }
+    },
+
+    FORMAT_32X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int literal = byte1(opcodeUnit); // should be zero
+            int a = in.read();
+            int b = in.read();
+            return new TwoRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(insn.getOpcodeUnit(), insn.getAUnit(), insn.getBUnit());
+        }
+    },
+
+    FORMAT_31I() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int literal = in.readInt();
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int literal = insn.getLiteralInt();
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    unit0(literal),
+                    unit1(literal));
+        }
+    },
+
+    FORMAT_31T() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.cursor() - 1;
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int target = baseAddress + in.readInt();
+
+            /*
+             * Switch instructions need to "forward" their addresses to their
+             * payload target instructions.
+             */
+            switch (opcode) {
+                case Opcodes.PACKED_SWITCH:
+                case Opcodes.SPARSE_SWITCH: {
+                    in.setBaseAddress(target, baseAddress);
+                    break;
+                }
+            }
+
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    target, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int relativeTarget = insn.getTarget(out.cursor());
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    unit0(relativeTarget), unit1(relativeTarget));
+        }
+    },
+
+    FORMAT_31C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            int index = in.readInt();
+            IndexType indexType = OpcodeInfo.getIndexType(opcode);
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, index, indexType,
+                    0, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int index = insn.getIndex();
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    unit0(index),
+                    unit1(index));
+        }
+    },
+
+    FORMAT_35C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterList(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterList(insn, out);
+        }
+    },
+
+    FORMAT_35MS() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterList(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterList(insn, out);
+        }
+    },
+
+    FORMAT_35MI() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterList(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterList(insn, out);
+        }
+    },
+
+    FORMAT_3RC() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterRange(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterRange(insn, out);
+        }
+    },
+
+    FORMAT_3RMS() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterRange(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterRange(insn, out);
+        }
+    },
+
+    FORMAT_3RMI() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            return decodeRegisterRange(this, opcodeUnit, in);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            encodeRegisterRange(insn, out);
+        }
+    },
+
+    FORMAT_51L() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int opcode = byte0(opcodeUnit);
+            int a = byte1(opcodeUnit);
+            long literal = in.readLong();
+            return new OneRegisterDecodedInstruction(
+                    this, opcode, 0, null,
+                    0, literal,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            long literal = insn.getLiteral();
+            out.write(
+                    codeUnit(insn.getOpcode(), insn.getA()),
+                    unit0(literal),
+                    unit1(literal),
+                    unit2(literal),
+                    unit3(literal));
+        }
+    },
+
+    FORMAT_33X() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int ab = in.read();
+            int a = byte0(ab);
+            int b = byte1(ab);
+            int c = in.read();
+            return new ThreeRegisterDecodedInstruction(
+                    this, opcodeUnit, 0, null,
+                    0, 0L,
+                    a, b, c);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    insn.getOpcodeUnit(),
+                    codeUnit(insn.getA(), insn.getB()),
+                    insn.getCUnit());
+        }
+    },
+
+    FORMAT_32S() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int ab = in.read();
+            int a = byte0(ab);
+            int b = byte1(ab);
+            int literal = (short) in.read(); // sign-extend
+            return new TwoRegisterDecodedInstruction(
+                    this, opcodeUnit, 0, null,
+                    0, literal,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            out.write(
+                    insn.getOpcodeUnit(),
+                    codeUnit(insn.getA(), insn.getB()),
+                    insn.getLiteralUnit());
+        }
+    },
+
+    FORMAT_40SC() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            // Note: We use the literal field to hold the decoded AA value.
+            int index = in.readInt();
+            int literal = in.read();
+            return new ZeroRegisterDecodedInstruction(
+                    this, opcodeUnit, index, IndexType.VARIES,
+                    0, literal);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int index = insn.getIndex();
+            out.write(
+                    insn.getOpcodeUnit(),
+                    unit0(index),
+                    unit1(index),
+                    insn.getLiteralUnit());
+        }
+    },
+
+    FORMAT_41C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int index = in.readInt();
+            int a = in.read();
+            IndexType indexType = OpcodeInfo.getIndexType(opcodeUnit);
+            return new OneRegisterDecodedInstruction(
+                    this, opcodeUnit, index, indexType,
+                    0, 0L,
+                    a);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int index = insn.getIndex();
+            out.write(
+                    insn.getOpcodeUnit(),
+                    unit0(index),
+                    unit1(index),
+                    insn.getAUnit());
+        }
+    },
+
+    FORMAT_52C() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int index = in.readInt();
+            int a = in.read();
+            int b = in.read();
+            IndexType indexType = OpcodeInfo.getIndexType(opcodeUnit);
+            return new TwoRegisterDecodedInstruction(
+                    this, opcodeUnit, index, indexType,
+                    0, 0L,
+                    a, b);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int index = insn.getIndex();
+            out.write(
+                    insn.getOpcodeUnit(),
+                    unit0(index),
+                    unit1(index),
+                    insn.getAUnit(),
+                    insn.getBUnit());
+        }
+    },
+
+    FORMAT_5RC() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int index = in.readInt();
+            int registerCount = in.read();
+            int a = in.read();
+            IndexType indexType = OpcodeInfo.getIndexType(opcodeUnit);
+            return new RegisterRangeDecodedInstruction(
+                    this, opcodeUnit, index, indexType,
+                    0, 0L,
+                    a, registerCount);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            int index = insn.getIndex();
+            out.write(
+                    insn.getOpcodeUnit(),
+                    unit0(index),
+                    unit1(index),
+                    insn.getRegisterCountUnit(),
+                    insn.getAUnit());
+        }
+    },
+
+    FORMAT_PACKED_SWITCH_PAYLOAD() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.baseAddressForCursor();
+            int size = in.read();
+            int firstKey = in.readInt();
+            int[] targets = new int[size];
+
+            for (int i = 0; i < size; i++) {
+                targets[i] = baseAddress + in.readInt();
+            }
+
+            return new PackedSwitchPayloadDecodedInstruction(
+                    this, opcodeUnit, firstKey, targets);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            PackedSwitchPayloadDecodedInstruction payload =
+                (PackedSwitchPayloadDecodedInstruction) insn;
+            int[] targets = payload.getTargets();
+            int baseAddress = out.baseAddressForCursor();
+
+            out.write(payload.getOpcodeUnit());
+            out.write(asUnsignedUnit(targets.length));
+            out.writeInt(payload.getFirstKey());
+
+            for (int target : targets) {
+                out.writeInt(target - baseAddress);
+            }
+        }
+    },
+
+    FORMAT_SPARSE_SWITCH_PAYLOAD() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int baseAddress = in.baseAddressForCursor();
+            int size = in.read();
+            int[] keys = new int[size];
+            int[] targets = new int[size];
+
+            for (int i = 0; i < size; i++) {
+                keys[i] = in.readInt();
+            }
+
+            for (int i = 0; i < size; i++) {
+                targets[i] = baseAddress + in.readInt();
+            }
+
+            return new SparseSwitchPayloadDecodedInstruction(
+                    this, opcodeUnit, keys, targets);
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            SparseSwitchPayloadDecodedInstruction payload =
+                (SparseSwitchPayloadDecodedInstruction) insn;
+            int[] keys = payload.getKeys();
+            int[] targets = payload.getTargets();
+            int baseAddress = out.baseAddressForCursor();
+
+            out.write(payload.getOpcodeUnit());
+            out.write(asUnsignedUnit(targets.length));
+
+            for (int key : keys) {
+                out.writeInt(key);
+            }
+
+            for (int target : targets) {
+                out.writeInt(target - baseAddress);
+            }
+        }
+    },
+
+    FORMAT_FILL_ARRAY_DATA_PAYLOAD() {
+        @Override public DecodedInstruction decode(int opcodeUnit,
+                CodeInput in) throws EOFException {
+            int elementWidth = in.read();
+            int size = in.readInt();
+
+            switch (elementWidth) {
+                case 1: {
+                    byte[] array = new byte[size];
+                    boolean even = true;
+                    for (int i = 0, value = 0; i < size; i++, even = !even) {
+                        if (even) {
+                            value = in.read();
+                        }
+                        array[i] = (byte) (value & 0xff);
+                        value >>= 8;
+                    }
+                    return new FillArrayDataPayloadDecodedInstruction(
+                            this, opcodeUnit, array);
+                }
+                case 2: {
+                    short[] array = new short[size];
+                    for (int i = 0; i < size; i++) {
+                        array[i] = (short) in.read();
+                    }
+                    return new FillArrayDataPayloadDecodedInstruction(
+                            this, opcodeUnit, array);
+                }
+                case 4: {
+                    int[] array = new int[size];
+                    for (int i = 0; i < size; i++) {
+                        array[i] = in.readInt();
+                    }
+                    return new FillArrayDataPayloadDecodedInstruction(
+                            this, opcodeUnit, array);
+                }
+                case 8: {
+                    long[] array = new long[size];
+                    for (int i = 0; i < size; i++) {
+                        array[i] = in.readLong();
+                    }
+                    return new FillArrayDataPayloadDecodedInstruction(
+                            this, opcodeUnit, array);
+                }
+            }
+
+            throw new DexException("bogus element_width: "
+                    + Hex.u2(elementWidth));
+        }
+
+        @Override public void encode(DecodedInstruction insn, CodeOutput out) {
+            FillArrayDataPayloadDecodedInstruction payload =
+                (FillArrayDataPayloadDecodedInstruction) insn;
+            short elementWidth = payload.getElementWidthUnit();
+            Object data = payload.getData();
+
+            out.write(payload.getOpcodeUnit());
+            out.write(elementWidth);
+            out.writeInt(payload.getSize());
+
+            switch (elementWidth) {
+                case 1: out.write((byte[]) data);  break;
+                case 2: out.write((short[]) data); break;
+                case 4: out.write((int[]) data);   break;
+                case 8: out.write((long[]) data);  break;
+                default: {
+                    throw new DexException("bogus element_width: "
+                            + Hex.u2(elementWidth));
+                }
+            }
+        }
+    };
+
+    /**
+     * Decodes an instruction specified by the given opcode unit, reading
+     * any required additional code units from the given input source.
+     */
+    public abstract DecodedInstruction decode(int opcodeUnit, CodeInput in)
+        throws EOFException;
+
+    /**
+     * Encodes the given instruction.
+     */
+    public abstract void encode(DecodedInstruction insn, CodeOutput out);
+
+    /**
+     * Helper method that decodes any of the register-list formats.
+     */
+    private static DecodedInstruction decodeRegisterList(
+            InstructionCodec format, int opcodeUnit, CodeInput in)
+            throws EOFException {
+        int opcode = byte0(opcodeUnit);
+        int e = nibble2(opcodeUnit);
+        int registerCount = nibble3(opcodeUnit);
+        int index = in.read();
+        int abcd = in.read();
+        int a = nibble0(abcd);
+        int b = nibble1(abcd);
+        int c = nibble2(abcd);
+        int d = nibble3(abcd);
+        IndexType indexType = OpcodeInfo.getIndexType(opcode);
+
+        // TODO: Having to switch like this is less than ideal.
+        switch (registerCount) {
+            case 0:
+                return new ZeroRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L);
+            case 1:
+                return new OneRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L,
+                        a);
+            case 2:
+                return new TwoRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L,
+                        a, b);
+            case 3:
+                return new ThreeRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L,
+                        a, b, c);
+            case 4:
+                return new FourRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L,
+                        a, b, c, d);
+            case 5:
+                return new FiveRegisterDecodedInstruction(
+                        format, opcode, index, indexType,
+                        0, 0L,
+                        a, b, c, d, e);
+        }
+
+        throw new DexException("bogus registerCount: "
+                + Hex.uNibble(registerCount));
+    }
+
+    /**
+     * Helper method that encodes any of the register-list formats.
+     */
+    private static void encodeRegisterList(DecodedInstruction insn,
+            CodeOutput out) {
+        out.write(codeUnit(insn.getOpcode(),
+                        makeByte(insn.getE(), insn.getRegisterCount())),
+                insn.getIndexUnit(),
+                codeUnit(insn.getA(), insn.getB(), insn.getC(), insn.getD()));
+    }
+
+    /**
+     * Helper method that decodes any of the three-unit register-range formats.
+     */
+    private static DecodedInstruction decodeRegisterRange(
+            InstructionCodec format, int opcodeUnit, CodeInput in)
+            throws EOFException {
+        int opcode = byte0(opcodeUnit);
+        int registerCount = byte1(opcodeUnit);
+        int index = in.read();
+        int a = in.read();
+        IndexType indexType = OpcodeInfo.getIndexType(opcode);
+        return new RegisterRangeDecodedInstruction(
+                format, opcode, index, indexType,
+                0, 0L,
+                a, registerCount);
+    }
+
+    /**
+     * Helper method that encodes any of the three-unit register-range formats.
+     */
+    private static void encodeRegisterRange(DecodedInstruction insn,
+            CodeOutput out) {
+        out.write(codeUnit(insn.getOpcode(), insn.getRegisterCount()),
+                insn.getIndexUnit(),
+                insn.getAUnit());
+    }
+
+    private static short codeUnit(int lowByte, int highByte) {
+        if ((lowByte & ~0xff) != 0) {
+            throw new IllegalArgumentException("bogus lowByte");
+        }
+
+        if ((highByte & ~0xff) != 0) {
+            throw new IllegalArgumentException("bogus highByte");
+        }
+
+        return (short) (lowByte | (highByte << 8));
+    }
+
+    private static short codeUnit(int nibble0, int nibble1, int nibble2,
+            int nibble3) {
+        if ((nibble0 & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus nibble0");
+        }
+
+        if ((nibble1 & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus nibble1");
+        }
+
+        if ((nibble2 & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus nibble2");
+        }
+
+        if ((nibble3 & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus nibble3");
+        }
+
+        return (short) (nibble0 | (nibble1 << 4)
+                | (nibble2 << 8) | (nibble3 << 12));
+    }
+
+    private static int makeByte(int lowNibble, int highNibble) {
+        if ((lowNibble & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus lowNibble");
+        }
+
+        if ((highNibble & ~0xf) != 0) {
+            throw new IllegalArgumentException("bogus highNibble");
+        }
+
+        return lowNibble | (highNibble << 4);
+    }
+
+    private static short asUnsignedUnit(int value) {
+        if ((value & ~0xffff) != 0) {
+            throw new IllegalArgumentException("bogus unsigned code unit");
+        }
+
+        return (short) value;
+    }
+
+    private static short unit0(int value) {
+        return (short) value;
+    }
+
+    private static short unit1(int value) {
+        return (short) (value >> 16);
+    }
+
+    private static short unit0(long value) {
+        return (short) value;
+    }
+
+    private static short unit1(long value) {
+        return (short) (value >> 16);
+    }
+
+    private static short unit2(long value) {
+        return (short) (value >> 32);
+    }
+
+    private static short unit3(long value) {
+        return (short) (value >> 48);
+    }
+
+    private static int byte0(int value) {
+        return value & 0xff;
+    }
+
+    private static int byte1(int value) {
+        return (value >> 8) & 0xff;
+    }
+
+    private static int byte2(int value) {
+        return (value >> 16) & 0xff;
+    }
+
+    private static int byte3(int value) {
+        return value >>> 24;
+    }
+
+    private static int nibble0(int value) {
+        return value & 0xf;
+    }
+
+    private static int nibble1(int value) {
+        return (value >> 4) & 0xf;
+    }
+
+    private static int nibble2(int value) {
+        return (value >> 8) & 0xf;
+    }
+
+    private static int nibble3(int value) {
+        return (value >> 12) & 0xf;
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/OneRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/OneRegisterDecodedInstruction.java
new file mode 100644
index 0000000..fd38e3b
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/OneRegisterDecodedInstruction.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has one register argument.
+ */
+public final class OneRegisterDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /**
+     * Constructs an instance.
+     */
+    public OneRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 1;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new OneRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/PackedSwitchPayloadDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/PackedSwitchPayloadDecodedInstruction.java
new file mode 100644
index 0000000..c31d319
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/PackedSwitchPayloadDecodedInstruction.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * A decoded Dalvik instruction which contains the payload for
+ * a {@code packed-switch} instruction.
+ */
+public final class PackedSwitchPayloadDecodedInstruction
+        extends DecodedInstruction {
+    /** first key value */
+    private final int firstKey;
+
+    /**
+     * array of target addresses. These are absolute, not relative,
+     * addresses.
+     */
+    private final int[] targets;
+
+    /**
+     * Constructs an instance.
+     */
+    public PackedSwitchPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, int firstKey, int[] targets) {
+        super(format, opcode, 0, null, 0, 0L);
+
+        this.firstKey = firstKey;
+        this.targets = targets;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 0;
+    }
+
+    public int getFirstKey() {
+        return firstKey;
+    }
+
+    public int[] getTargets() {
+        return targets;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        throw new UnsupportedOperationException("no index in instruction");
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/RegisterRangeDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/RegisterRangeDecodedInstruction.java
new file mode 100644
index 0000000..f294f63
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/RegisterRangeDecodedInstruction.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has register range arguments (an
+ * "A" start register and a register count).
+ */
+public final class RegisterRangeDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /** register count */
+    private final int registerCount;
+
+    /**
+     * Constructs an instance.
+     */
+    public RegisterRangeDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a, int registerCount) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+        this.registerCount = registerCount;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return registerCount;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new RegisterRangeDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a, registerCount);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/ShortArrayCodeInput.java b/dx/src/com/android/dx/io/instructions/ShortArrayCodeInput.java
new file mode 100644
index 0000000..49ce473
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/ShortArrayCodeInput.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import java.io.EOFException;
+
+/**
+ * Implementation of {@code CodeInput} that reads from a {@code short[]}.
+ */
+public final class ShortArrayCodeInput extends BaseCodeCursor
+        implements CodeInput {
+    /** source array to read from */
+    private final short[] array;
+
+    /**
+     * Constructs an instance.
+     */
+    public ShortArrayCodeInput(short[] array) {
+        if (array == null) {
+            throw new NullPointerException("array == null");
+        }
+
+        this.array = array;
+    }
+
+    /** @inheritDoc */
+    public boolean hasMore() {
+        return cursor() < array.length;
+    }
+
+    /** @inheritDoc */
+    public int read() throws EOFException {
+        try {
+            int value = array[cursor()];
+            advance(1);
+            return value & 0xffff;
+        } catch (ArrayIndexOutOfBoundsException ex) {
+            throw new EOFException();
+        }
+    }
+
+    /** @inheritDoc */
+    public int readInt() throws EOFException {
+        int short0 = read();
+        int short1 = read();
+
+        return short0 | (short1 << 16);
+    }
+
+    /** @inheritDoc */
+    public long readLong() throws EOFException {
+        long short0 = read();
+        long short1 = read();
+        long short2 = read();
+        long short3 = read();
+
+        return short0 | (short1 << 16) | (short2 << 32) | (short3 << 48);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/ShortArrayCodeOutput.java b/dx/src/com/android/dx/io/instructions/ShortArrayCodeOutput.java
new file mode 100644
index 0000000..efa7ddd
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/ShortArrayCodeOutput.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * Implementation of {@code CodeOutput} that writes to a {@code short[]}.
+ */
+public final class ShortArrayCodeOutput extends BaseCodeCursor
+        implements CodeOutput {
+    /** array to write to */
+    private final short[] array;
+
+    /**
+     * Constructs an instance.
+     *
+     * @param maxSize the maximum number of code units that will be written
+     */
+    public ShortArrayCodeOutput(int maxSize) {
+        if (maxSize < 0) {
+            throw new IllegalArgumentException("maxSize < 0");
+        }
+
+        this.array = new short[maxSize];
+    }
+
+    /**
+     * Gets the array. The returned array contains exactly the data
+     * written (e.g. no leftover space at the end).
+     */
+    public short[] getArray() {
+        int cursor = cursor();
+
+        if (cursor == array.length) {
+            return array;
+        }
+
+        short[] result = new short[cursor];
+        System.arraycopy(array, 0, result, 0, cursor);
+        return result;
+    }
+
+    /** @inheritDoc */
+    public void write(short codeUnit) {
+        array[cursor()] = codeUnit;
+        advance(1);
+    }
+
+    /** @inheritDoc */
+    public void write(short u0, short u1) {
+        write(u0);
+        write(u1);
+    }
+
+    /** @inheritDoc */
+    public void write(short u0, short u1, short u2) {
+        write(u0);
+        write(u1);
+        write(u2);
+    }
+
+    /** @inheritDoc */
+    public void write(short u0, short u1, short u2, short u3) {
+        write(u0);
+        write(u1);
+        write(u2);
+        write(u3);
+    }
+
+    /** @inheritDoc */
+    public void write(short u0, short u1, short u2, short u3, short u4) {
+        write(u0);
+        write(u1);
+        write(u2);
+        write(u3);
+        write(u4);
+    }
+
+    /** @inheritDoc */
+    public void writeInt(int value) {
+        write((short) value);
+        write((short) (value >> 16));
+    }
+
+    /** @inheritDoc */
+    public void writeLong(long value) {
+        write((short) value);
+        write((short) (value >> 16));
+        write((short) (value >> 32));
+        write((short) (value >> 48));
+    }
+
+    /** @inheritDoc */
+    public void write(byte[] data) {
+        int value = 0;
+        boolean even = true;
+        for (byte b : data) {
+            if (even) {
+                value = b & 0xff;
+                even = false;
+            } else {
+                value |= b << 8;
+                write((short) value);
+                even = true;
+            }
+        }
+
+        if (!even) {
+            write((short) value);
+        }
+    }
+
+    /** @inheritDoc */
+    public void write(short[] data) {
+        for (short unit : data) {
+            write(unit);
+        }
+    }
+
+    /** @inheritDoc */
+    public void write(int[] data) {
+        for (int i : data) {
+            writeInt(i);
+        }
+    }
+
+    /** @inheritDoc */
+    public void write(long[] data) {
+        for (long l : data) {
+            writeLong(l);
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/SparseSwitchPayloadDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/SparseSwitchPayloadDecodedInstruction.java
new file mode 100644
index 0000000..bfc47c9
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/SparseSwitchPayloadDecodedInstruction.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+/**
+ * A decoded Dalvik instruction which contains the payload for
+ * a {@code packed-switch} instruction.
+ */
+public final class SparseSwitchPayloadDecodedInstruction
+        extends DecodedInstruction {
+    /** array of key values */
+    private final int[] keys;
+
+    /**
+     * array of target addresses. These are absolute, not relative,
+     * addresses.
+     */
+    private final int[] targets;
+
+    /**
+     * Constructs an instance.
+     */
+    public SparseSwitchPayloadDecodedInstruction(InstructionCodec format,
+            int opcode, int[] keys, int[] targets) {
+        super(format, opcode, 0, null, 0, 0L);
+
+        if (keys.length != targets.length) {
+            throw new IllegalArgumentException("keys/targets length mismatch");
+        }
+
+        this.keys = keys;
+        this.targets = targets;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 0;
+    }
+
+    public int[] getKeys() {
+        return keys;
+    }
+
+    public int[] getTargets() {
+        return targets;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        throw new UnsupportedOperationException("no index in instruction");
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/ThreeRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/ThreeRegisterDecodedInstruction.java
new file mode 100644
index 0000000..a463677
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/ThreeRegisterDecodedInstruction.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has three register arguments.
+ */
+public final class ThreeRegisterDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /** register argument "B" */
+    private final int b;
+
+    /** register argument "C" */
+    private final int c;
+
+    /**
+     * Constructs an instance.
+     */
+    public ThreeRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a, int b, int c) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+        this.b = b;
+        this.c = c;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 3;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public int getB() {
+        return b;
+    }
+
+    /** @inheritDoc */
+    public int getC() {
+        return c;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new ThreeRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a, b, c);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/TwoRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/TwoRegisterDecodedInstruction.java
new file mode 100644
index 0000000..acb77ba
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/TwoRegisterDecodedInstruction.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has two register arguments.
+ */
+public final class TwoRegisterDecodedInstruction extends DecodedInstruction {
+    /** register argument "A" */
+    private final int a;
+
+    /** register argument "B" */
+    private final int b;
+
+    /**
+     * Constructs an instance.
+     */
+    public TwoRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal,
+            int a, int b) {
+        super(format, opcode, index, indexType, target, literal);
+
+        this.a = a;
+        this.b = b;
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 2;
+    }
+
+    /** @inheritDoc */
+    public int getA() {
+        return a;
+    }
+
+    /** @inheritDoc */
+    public int getB() {
+        return b;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new TwoRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral(), a, b);
+    }
+}
diff --git a/dx/src/com/android/dx/io/instructions/ZeroRegisterDecodedInstruction.java b/dx/src/com/android/dx/io/instructions/ZeroRegisterDecodedInstruction.java
new file mode 100644
index 0000000..172caa4
--- /dev/null
+++ b/dx/src/com/android/dx/io/instructions/ZeroRegisterDecodedInstruction.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.io.instructions;
+
+import com.android.dx.io.IndexType;
+
+/**
+ * A decoded Dalvik instruction which has no register arguments.
+ */
+public final class ZeroRegisterDecodedInstruction extends DecodedInstruction {
+    /**
+     * Constructs an instance.
+     */
+    public ZeroRegisterDecodedInstruction(InstructionCodec format, int opcode,
+            int index, IndexType indexType, int target, long literal) {
+        super(format, opcode, index, indexType, target, literal);
+    }
+
+    /** @inheritDoc */
+    public int getRegisterCount() {
+        return 0;
+    }
+
+    /** @inheritDoc */
+    public DecodedInstruction withIndex(int newIndex) {
+        return new ZeroRegisterDecodedInstruction(
+                getFormat(), getOpcode(), newIndex, getIndexType(),
+                getTarget(), getLiteral());
+    }
+}
diff --git a/dx/src/com/android/dx/merge/DexMerger.java b/dx/src/com/android/dx/merge/DexMerger.java
new file mode 100644
index 0000000..1fe0ff5
--- /dev/null
+++ b/dx/src/com/android/dx/merge/DexMerger.java
@@ -0,0 +1,869 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.dex.SizeOf;
+import com.android.dx.dex.TableOfContents;
+import com.android.dx.io.ClassData;
+import com.android.dx.io.ClassDef;
+import com.android.dx.io.Code;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.io.DexHasher;
+import com.android.dx.io.FieldId;
+import com.android.dx.io.MethodId;
+import com.android.dx.io.ProtoId;
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * Combine two dex files into one.
+ */
+public final class DexMerger {
+    private final WriterSizes writerSizes;
+    private final DexBuffer dexOut = new DexBuffer();
+    private final DexBuffer.Section headerOut;
+    /** All IDs and definitions sections */
+    private final DexBuffer.Section idsDefsOut;
+    private final DexBuffer.Section mapListOut;
+    private final DexBuffer.Section typeListOut;
+    private final DexBuffer.Section classDataOut;
+    private final DexBuffer.Section codeOut;
+    private final DexBuffer.Section stringDataOut;
+    private final DexBuffer.Section debugInfoOut;
+    private final DexBuffer.Section encodedArrayOut;
+
+    /** annotations directory on a type */
+    private final DexBuffer.Section annotationsDirectoryOut;
+    /** sets of annotations on a member, parameter or type */
+    private final DexBuffer.Section annotationSetOut;
+    /** parameter lists */
+    private final DexBuffer.Section annotationSetRefListOut;
+    /** individual annotations, each containing zero or more fields */
+    private final DexBuffer.Section annotationOut;
+
+    private final TableOfContents contentsOut;
+    private final DexBuffer dexA;
+    private final DexBuffer dexB;
+    private final IndexMap aIndexMap;
+    private final IndexMap bIndexMap;
+    private final InstructionTransformer aInstructionTransformer;
+    private final InstructionTransformer bInstructionTransformer;
+
+    /** minimum number of wasted bytes before it's worthwhile to compact the result */
+    private int compactWasteThreshold = 1024 * 1024; // 1MiB
+    /** minimum number of wasted bytes before it's worthwhile to emit a warning. */
+    private final int warnWasteThreshold = 100 * 1024; // 100KiB
+
+    public DexMerger(DexBuffer dexA, DexBuffer dexB) throws IOException {
+        this(dexA, dexB, new WriterSizes(dexA, dexB));
+    }
+
+    private DexMerger(DexBuffer dexA, DexBuffer dexB, WriterSizes writerSizes) throws IOException {
+        this.dexA = dexA;
+        this.dexB = dexB;
+        this.writerSizes = writerSizes;
+
+        TableOfContents aContents = dexA.getTableOfContents();
+        TableOfContents bContents = dexB.getTableOfContents();
+        aIndexMap = new IndexMap(dexOut, aContents);
+        bIndexMap = new IndexMap(dexOut, bContents);
+        aInstructionTransformer = new InstructionTransformer(aIndexMap);
+        bInstructionTransformer = new InstructionTransformer(bIndexMap);
+
+        headerOut = dexOut.appendSection(writerSizes.header, "header");
+        idsDefsOut = dexOut.appendSection(writerSizes.idsDefs, "ids defs");
+
+        contentsOut = dexOut.getTableOfContents();
+        contentsOut.dataOff = dexOut.getLength();
+
+        contentsOut.mapList.off = dexOut.getLength();
+        contentsOut.mapList.size = 1;
+        mapListOut = dexOut.appendSection(writerSizes.mapList, "map list");
+
+        contentsOut.typeLists.off = dexOut.getLength();
+        contentsOut.typeLists.size = 0;
+        typeListOut = dexOut.appendSection(writerSizes.typeList, "type list");
+
+        contentsOut.annotationSetRefLists.off = dexOut.getLength();
+        contentsOut.annotationSetRefLists.size = 0;
+        annotationSetRefListOut = dexOut.appendSection(
+                writerSizes.annotationsSetRefList, "annotation set ref list");
+
+        contentsOut.annotationSets.off = dexOut.getLength();
+        contentsOut.annotationSets.size = 0;
+        annotationSetOut = dexOut.appendSection(
+                writerSizes.annotationsSet, "annotation sets");
+
+        contentsOut.classDatas.off = dexOut.getLength();
+        contentsOut.classDatas.size = 0;
+        classDataOut = dexOut.appendSection(writerSizes.classData, "class data");
+
+        contentsOut.codes.off = dexOut.getLength();
+        contentsOut.codes.size = 0;
+        codeOut = dexOut.appendSection(writerSizes.code, "code");
+
+        contentsOut.stringDatas.off = dexOut.getLength();
+        contentsOut.stringDatas.size = 0;
+        stringDataOut = dexOut.appendSection(writerSizes.stringData, "string data");
+
+        contentsOut.debugInfos.off = dexOut.getLength();
+        contentsOut.debugInfos.size = 0;
+        debugInfoOut = dexOut.appendSection(writerSizes.debugInfo, "debug info");
+
+        contentsOut.annotations.off = dexOut.getLength();
+        contentsOut.annotations.size = 0;
+        annotationOut = dexOut.appendSection(writerSizes.annotation, "annotation");
+
+        contentsOut.encodedArrays.off = dexOut.getLength();
+        contentsOut.encodedArrays.size = 0;
+        encodedArrayOut = dexOut.appendSection(writerSizes.encodedArray, "encoded array");
+
+        contentsOut.annotationsDirectories.off = dexOut.getLength();
+        contentsOut.annotationsDirectories.size = 0;
+        annotationsDirectoryOut = dexOut.appendSection(
+                writerSizes.annotationsDirectory, "annotations directory");
+
+        dexOut.noMoreSections();
+        contentsOut.dataSize = dexOut.getLength() - contentsOut.dataOff;
+    }
+
+    public void setCompactWasteThreshold(int compactWasteThreshold) {
+        this.compactWasteThreshold = compactWasteThreshold;
+    }
+
+    private DexBuffer mergeDexBuffers() throws IOException {
+        mergeStringIds();
+        mergeTypeIds();
+        mergeTypeLists();
+        mergeProtoIds();
+        mergeFieldIds();
+        mergeMethodIds();
+        unionAnnotations();
+        mergeClassDefs();
+
+        // write the header
+        contentsOut.header.off = 0;
+        contentsOut.header.size = 1;
+        contentsOut.fileSize = dexOut.getLength();
+        contentsOut.computeSizesFromOffsets();
+        contentsOut.writeHeader(headerOut);
+        contentsOut.writeMap(mapListOut);
+
+        // generate and write the hashes
+        new DexHasher().writeHashes(dexOut);
+
+        return dexOut;
+    }
+
+    public DexBuffer merge() throws IOException {
+        long start = System.nanoTime();
+        DexBuffer result = mergeDexBuffers();
+
+        /*
+         * We use pessimistic sizes when merging dex files. If those sizes
+         * result in too many bytes wasted, compact the result. To compact,
+         * simply merge the result with itself.
+         */
+        WriterSizes compactedSizes = writerSizes.clone();
+        compactedSizes.minusWaste(this);
+        int wastedByteCount = writerSizes.size() - compactedSizes.size();
+        if (wastedByteCount >  + compactWasteThreshold) {
+            DexMerger compacter = new DexMerger(dexOut, dexOut, compactedSizes);
+            result = compacter.mergeDexBuffers();
+            System.out.printf("Result compacted from %.1fKiB to %.1fKiB to save %.1fKiB%n",
+                    dexOut.getLength() / 1024f,
+                    result.getLength() / 1024f,
+                    wastedByteCount / 1024f);
+        } else if (wastedByteCount >= warnWasteThreshold) {
+            System.out.printf("Result includes %.1fKiB of wasted space",
+                    wastedByteCount / 1024f);
+        }
+
+        long elapsed = System.nanoTime() - start;
+        System.out.printf("Merged dex A (%d defs/%.1fKiB) with dex B "
+                + "(%d defs/%.1fKiB). Result is %d defs/%.1fKiB. Took %.1fs%n",
+                dexA.getTableOfContents().classDefs.size,
+                dexA.getLength() / 1024f,
+                dexB.getTableOfContents().classDefs.size,
+                dexB.getLength() / 1024f,
+                result.getTableOfContents().classDefs.size,
+                result.getLength() / 1024f,
+                elapsed / 1000000000f);
+
+        return result;
+    }
+
+    /**
+     * Reads an IDs section of two dex files and writes an IDs section of a
+     * merged dex file. Populates maps from old to new indices in the process.
+     */
+    abstract class IdMerger<T extends Comparable<T>> {
+        private final DexBuffer.Section out;
+
+        protected IdMerger(DexBuffer.Section out) {
+            this.out = out;
+        }
+
+        public final void merge() {
+            TableOfContents.Section aSection = getSection(dexA.getTableOfContents());
+            TableOfContents.Section bSection = getSection(dexB.getTableOfContents());
+            getSection(contentsOut).off = out.getPosition();
+
+            DexBuffer.Section inA = aSection.exists() ? dexA.open(aSection.off) : null;
+            DexBuffer.Section inB = bSection.exists() ? dexB.open(bSection.off) : null;
+            int aIndex = 0;
+            int bIndex = 0;
+            int outCount = 0;
+            T a = null;
+            T b = null;
+
+            while (true) {
+                int aOffset = -1;
+                if (a == null && aIndex < aSection.size) {
+                    aOffset = inA.getPosition();
+                    a = read(inA, aIndexMap, aIndex);
+                }
+                int bOffset = -1;
+                if (b == null && bIndex < bSection.size) {
+                    bOffset = inB.getPosition();
+                    b = read(inB, bIndexMap, bIndex);
+                }
+
+                // Write the smaller of a and b. If they're equal, write only once
+                boolean advanceA;
+                boolean advanceB;
+                if (a != null && b != null) {
+                    int compare = a.compareTo(b);
+                    advanceA = compare <= 0;
+                    advanceB = compare >= 0;
+                } else {
+                    advanceA = (a != null);
+                    advanceB = (b != null);
+                }
+
+                T toWrite = null;
+                if (advanceA) {
+                    toWrite = a;
+                    updateIndex(aOffset, aIndexMap, aIndex++, outCount);
+                    a = null;
+                }
+                if (advanceB) {
+                    toWrite = b;
+                    updateIndex(bOffset, bIndexMap, bIndex++, outCount);
+                    b = null;
+                }
+                if (toWrite == null) {
+                    break; // advanceA == false && advanceB == false
+                }
+                write(toWrite);
+                outCount++;
+            }
+
+            getSection(contentsOut).size = outCount;
+        }
+
+        abstract TableOfContents.Section getSection(TableOfContents tableOfContents);
+        abstract T read(DexBuffer.Section in, IndexMap indexMap, int index);
+        abstract void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex);
+        abstract void write(T value);
+    }
+
+    private void mergeStringIds() {
+        new IdMerger<String>(idsDefsOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.stringIds;
+            }
+
+            @Override String read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                return in.readString();
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.stringIds[oldIndex] = newIndex;
+            }
+
+            @Override void write(String value) {
+                contentsOut.stringDatas.size++;
+                idsDefsOut.writeInt(stringDataOut.getPosition());
+                stringDataOut.writeStringData(value);
+            }
+        }.merge();
+    }
+
+    private void mergeTypeIds() {
+        new IdMerger<Integer>(idsDefsOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.typeIds;
+            }
+
+            @Override Integer read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                int stringIndex = in.readInt();
+                return indexMap.adjustString(stringIndex);
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.typeIds[oldIndex] = (short) newIndex;
+            }
+
+            @Override void write(Integer value) {
+                idsDefsOut.writeInt(value);
+            }
+        }.merge();
+    }
+
+    private void mergeTypeLists() {
+        new IdMerger<TypeList>(typeListOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.typeLists;
+            }
+
+            @Override TypeList read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                return indexMap.adjustTypeList(in.readTypeList());
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.typeListOffsets.put(offset, typeListOut.getPosition());
+            }
+
+            @Override void write(TypeList value) {
+                typeListOut.writeTypeList(value);
+            }
+        }.merge();
+    }
+
+    private void mergeProtoIds() {
+        new IdMerger<ProtoId>(idsDefsOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.protoIds;
+            }
+
+            @Override ProtoId read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                return indexMap.adjust(in.readProtoId());
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.protoIds[oldIndex] = (short) newIndex;
+            }
+
+            @Override void write(ProtoId value) {
+                value.writeTo(idsDefsOut);
+            }
+        }.merge();
+    }
+
+    private void mergeFieldIds() {
+        new IdMerger<FieldId>(idsDefsOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.fieldIds;
+            }
+
+            @Override FieldId read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                return indexMap.adjust(in.readFieldId());
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.fieldIds[oldIndex] = (short) newIndex;
+            }
+
+            @Override void write(FieldId value) {
+                value.writeTo(idsDefsOut);
+            }
+        }.merge();
+    }
+
+    private void mergeMethodIds() {
+        new IdMerger<MethodId>(idsDefsOut) {
+            @Override TableOfContents.Section getSection(TableOfContents tableOfContents) {
+                return tableOfContents.methodIds;
+            }
+
+            @Override MethodId read(DexBuffer.Section in, IndexMap indexMap, int index) {
+                return indexMap.adjust(in.readMethodId());
+            }
+
+            @Override void updateIndex(int offset, IndexMap indexMap, int oldIndex, int newIndex) {
+                indexMap.methodIds[oldIndex] = (short) newIndex;
+            }
+
+            @Override void write(MethodId methodId) {
+                methodId.writeTo(idsDefsOut);
+            }
+        }.merge();
+    }
+
+    private void mergeClassDefs() {
+        SortableType[] types = getSortedTypes();
+        contentsOut.classDefs.off = idsDefsOut.getPosition();
+        contentsOut.classDefs.size = types.length;
+
+        for (SortableType type : types) {
+            DexBuffer in = type.getBuffer();
+            IndexMap indexMap = (in == dexA) ? aIndexMap : bIndexMap;
+            transformClassDef(in, type.getClassDef(), indexMap);
+        }
+    }
+
+    /**
+     * Returns the union of classes from both files, sorted in order such that
+     * a class is always preceded by its supertype and implemented interfaces.
+     */
+    private SortableType[] getSortedTypes() {
+        // size is pessimistic; doesn't include arrays
+        SortableType[] sortableTypes = new SortableType[contentsOut.typeIds.size];
+        readSortableTypes(sortableTypes, dexA, aIndexMap);
+        readSortableTypes(sortableTypes, dexB, bIndexMap);
+
+        /*
+         * Populate the depths of each sortable type. This makes D iterations
+         * through all N types, where 'D' is the depth of the deepest type. For
+         * example, the deepest class in libcore is Xalan's KeyIterator, which
+         * is 11 types deep.
+         */
+        while (true) {
+            boolean allDone = true;
+            for (SortableType sortableType : sortableTypes) {
+                if (sortableType != null && !sortableType.isDepthAssigned()) {
+                    allDone &= sortableType.tryAssignDepth(sortableTypes);
+                }
+            }
+            if (allDone) {
+                break;
+            }
+        }
+
+        // Now that all types have depth information, the result can be sorted
+        Arrays.sort(sortableTypes, SortableType.NULLS_LAST_ORDER);
+
+        // Strip nulls from the end
+        int firstNull = Arrays.asList(sortableTypes).indexOf(null);
+        return firstNull != -1
+                ? Arrays.copyOfRange(sortableTypes, 0, firstNull)
+                : sortableTypes;
+    }
+
+    /**
+     * Reads just enough data on each class so that we can sort it and then find
+     * it later.
+     */
+    private void readSortableTypes(SortableType[] sortableTypes, DexBuffer buffer,
+            IndexMap indexMap) {
+        for (ClassDef classDef : buffer.classDefs()) {
+            SortableType sortableType = indexMap.adjust(new SortableType(buffer, classDef));
+            int t = sortableType.getTypeIndex();
+            if (sortableTypes[t] == null) {
+                sortableTypes[t] = sortableType;
+            }
+        }
+    }
+
+    /**
+     * Copy annotation sets from each input to the output.
+     *
+     * TODO: this may write multiple copies of the same annotation.
+     * This should shrink the output by merging rather than unioning
+     */
+    private void unionAnnotations() {
+        transformAnnotationSets(dexA, aIndexMap);
+        transformAnnotationSets(dexB, bIndexMap);
+        transformAnnotationDirectories(dexA, aIndexMap);
+        transformAnnotationDirectories(dexB, bIndexMap);
+    }
+
+    private void transformAnnotationSets(DexBuffer in, IndexMap indexMap) {
+        TableOfContents.Section section = in.getTableOfContents().annotationSets;
+        if (section.exists()) {
+            DexBuffer.Section setIn = in.open(section.off);
+            for (int i = 0; i < section.size; i++) {
+                transformAnnotationSet(in, indexMap, setIn);
+            }
+        }
+    }
+
+    private void transformAnnotationDirectories(DexBuffer in, IndexMap indexMap) {
+        TableOfContents.Section section = in.getTableOfContents().annotationsDirectories;
+        if (section.exists()) {
+            DexBuffer.Section directoryIn = in.open(section.off);
+            for (int i = 0; i < section.size; i++) {
+                transformAnnotationDirectory(in, directoryIn, indexMap);
+            }
+        }
+    }
+
+    /**
+     * Reads a class_def_item beginning at {@code in} and writes the index and
+     * data.
+     */
+    private void transformClassDef(DexBuffer in, ClassDef classDef, IndexMap indexMap) {
+        idsDefsOut.assertFourByteAligned();
+        idsDefsOut.writeInt(classDef.getTypeIndex());
+        idsDefsOut.writeInt(classDef.getAccessFlags());
+        idsDefsOut.writeInt(classDef.getSupertypeIndex());
+        idsDefsOut.writeInt(classDef.getInterfacesOffset());
+
+        int sourceFileIndex = indexMap.adjustString(classDef.getSourceFileIndex());
+        idsDefsOut.writeInt(sourceFileIndex);
+
+        int annotationsOff = classDef.getAnnotationsOffset();
+        idsDefsOut.writeInt(indexMap.adjustAnnotationDirectory(annotationsOff));
+
+        int classDataOff = classDef.getClassDataOffset();
+        if (classDataOff == 0) {
+            idsDefsOut.writeInt(0);
+        } else {
+            idsDefsOut.writeInt(classDataOut.getPosition());
+            ClassData classData = in.readClassData(classDef);
+            transformClassData(in, classData, indexMap);
+        }
+
+        int staticValuesOff = classDef.getStaticValuesOffset();
+        if (staticValuesOff == 0) {
+            idsDefsOut.writeInt(0);
+        } else {
+            DexBuffer.Section staticValuesIn = in.open(staticValuesOff);
+            idsDefsOut.writeInt(encodedArrayOut.getPosition());
+            transformStaticValues(staticValuesIn, indexMap);
+        }
+    }
+
+    /**
+     * Transform all annotations on a class.
+     */
+    private void transformAnnotationDirectory(
+            DexBuffer in, DexBuffer.Section directoryIn, IndexMap indexMap) {
+        contentsOut.annotationsDirectories.size++;
+        annotationsDirectoryOut.assertFourByteAligned();
+        indexMap.annotationDirectoryOffsets.put(
+                directoryIn.getPosition(), annotationsDirectoryOut.getPosition());
+
+        int classAnnotationsOffset = indexMap.adjustAnnotationSet(directoryIn.readInt());
+        annotationsDirectoryOut.writeInt(classAnnotationsOffset);
+
+        int fieldsSize = directoryIn.readInt();
+        annotationsDirectoryOut.writeInt(fieldsSize);
+
+        int methodsSize = directoryIn.readInt();
+        annotationsDirectoryOut.writeInt(methodsSize);
+
+        int parameterListSize = directoryIn.readInt();
+        annotationsDirectoryOut.writeInt(parameterListSize);
+
+        for (int i = 0; i < fieldsSize; i++) {
+            // field index
+            annotationsDirectoryOut.writeInt(indexMap.adjustField(directoryIn.readInt()));
+
+            // annotations offset
+            annotationsDirectoryOut.writeInt(indexMap.adjustAnnotationSet(directoryIn.readInt()));
+        }
+
+        for (int i = 0; i < methodsSize; i++) {
+            // method index
+            annotationsDirectoryOut.writeInt(indexMap.adjustMethod(directoryIn.readInt()));
+
+            // annotation set offset
+            annotationsDirectoryOut.writeInt(
+                    indexMap.adjustAnnotationSet(directoryIn.readInt()));
+        }
+
+        for (int i = 0; i < parameterListSize; i++) {
+            contentsOut.annotationSetRefLists.size++;
+            annotationSetRefListOut.assertFourByteAligned();
+
+            // method index
+            annotationsDirectoryOut.writeInt(indexMap.adjustMethod(directoryIn.readInt()));
+
+            // annotations offset
+            annotationsDirectoryOut.writeInt(annotationSetRefListOut.getPosition());
+            DexBuffer.Section refListIn = in.open(directoryIn.readInt());
+
+            // parameters
+            int parameterCount = refListIn.readInt();
+            annotationSetRefListOut.writeInt(parameterCount);
+            for (int p = 0; p < parameterCount; p++) {
+                annotationSetRefListOut.writeInt(indexMap.adjustAnnotationSet(refListIn.readInt()));
+            }
+        }
+    }
+
+    /**
+     * Transform all annotations on a single type, member or parameter.
+     */
+    private void transformAnnotationSet(DexBuffer in, IndexMap indexMap, DexBuffer.Section setIn) {
+        contentsOut.annotationSets.size++;
+        annotationSetOut.assertFourByteAligned();
+        indexMap.annotationSetOffsets.put(setIn.getPosition(), annotationSetOut.getPosition());
+
+        int size = setIn.readInt();
+        annotationSetOut.writeInt(size);
+
+        for (int j = 0; j < size; j++) {
+            // annotation offset
+            annotationSetOut.writeInt(annotationOut.getPosition());
+            transformAnnotation(in.open(setIn.readInt()), indexMap);
+        }
+    }
+
+    /**
+     * Transform one annotation, which may have multiple fields.
+     */
+    private void transformAnnotation(DexBuffer.Section in, IndexMap indexMap) {
+        contentsOut.annotations.size++;
+
+        // visibility
+        annotationOut.writeByte(in.readByte());
+
+        // type index
+        annotationOut.writeUleb128((int) indexMap.adjustType(in.readUleb128()));
+
+        // size
+        int size = in.readUleb128();
+        annotationOut.writeUleb128(size);
+
+        // elements
+        for (int i = 0; i < size; i++) {
+            annotationOut.writeUleb128(indexMap.adjustString(in.readUleb128())); // name
+            new EncodedValueTransformer(in, indexMap, annotationOut).readValue(); // value
+        }
+    }
+
+    private void transformClassData(DexBuffer in, ClassData classData, IndexMap indexMap) {
+        contentsOut.classDatas.size++;
+
+        ClassData.Field[] staticFields = classData.getStaticFields();
+        ClassData.Field[] instanceFields = classData.getInstanceFields();
+        ClassData.Method[] directMethods = classData.getDirectMethods();
+        ClassData.Method[] virtualMethods = classData.getVirtualMethods();
+
+        classDataOut.writeUleb128(staticFields.length);
+        classDataOut.writeUleb128(instanceFields.length);
+        classDataOut.writeUleb128(directMethods.length);
+        classDataOut.writeUleb128(virtualMethods.length);
+
+        transformFields(indexMap, staticFields);
+        transformFields(indexMap, instanceFields);
+        transformMethods(in, indexMap, directMethods);
+        transformMethods(in, indexMap, virtualMethods);
+    }
+
+    private void transformFields(IndexMap indexMap, ClassData.Field[] fields) {
+        int lastOutFieldIndex = 0;
+        for (ClassData.Field field : fields) {
+            int outFieldIndex = indexMap.adjustField(field.getFieldIndex());
+            classDataOut.writeUleb128(outFieldIndex - lastOutFieldIndex);
+            lastOutFieldIndex = outFieldIndex;
+            classDataOut.writeUleb128(field.getAccessFlags());
+        }
+    }
+
+    private void transformMethods(DexBuffer in, IndexMap indexMap, ClassData.Method[] methods) {
+        int lastOutMethodIndex = 0;
+        for (ClassData.Method method : methods) {
+            int outMethodIndex = indexMap.adjustMethod(method.getMethodIndex());
+            classDataOut.writeUleb128(outMethodIndex - lastOutMethodIndex);
+            lastOutMethodIndex = outMethodIndex;
+
+            classDataOut.writeUleb128(method.getAccessFlags());
+
+            if (method.getCodeOffset() == 0) {
+                classDataOut.writeUleb128(0);
+            } else {
+                codeOut.alignToFourBytes();
+                classDataOut.writeUleb128(codeOut.getPosition());
+                transformCode(in, in.readCode(method), indexMap);
+            }
+        }
+    }
+
+    private void transformCode(DexBuffer in, Code code, IndexMap indexMap) {
+        contentsOut.codes.size++;
+        codeOut.assertFourByteAligned();
+
+        codeOut.writeShort(code.getRegistersSize());
+        codeOut.writeShort(code.getInsSize());
+        codeOut.writeShort(code.getOutsSize());
+
+        Code.Try[] tries = code.getTries();
+        codeOut.writeShort((short) tries.length);
+
+        // TODO: retain debug info
+        // code.getDebugInfoOffset();
+        codeOut.writeInt(0);
+
+        short[] instructions = code.getInstructions();
+        InstructionTransformer transformer = (in == dexA)
+                ? aInstructionTransformer
+                : bInstructionTransformer;
+        short[] newInstructions = transformer.transform(instructions);
+        codeOut.writeInt(newInstructions.length);
+        codeOut.write(newInstructions);
+
+        if (tries.length > 0) {
+            if (newInstructions.length % 2 == 1) {
+                codeOut.writeShort((short) 0); // padding
+            }
+            for (Code.Try tryItem : tries) {
+                codeOut.writeInt(tryItem.getStartAddress());
+                codeOut.writeShort(tryItem.getInstructionCount());
+                codeOut.writeShort(tryItem.getHandlerOffset());
+            }
+            Code.CatchHandler[] catchHandlers = code.getCatchHandlers();
+            codeOut.writeUleb128(catchHandlers.length);
+            for (Code.CatchHandler catchHandler : catchHandlers) {
+                transformEncodedCatchHandler(catchHandler, indexMap);
+            }
+        }
+    }
+
+    private void transformEncodedCatchHandler(Code.CatchHandler catchHandler, IndexMap indexMap) {
+        int catchAllAddress = catchHandler.getCatchAllAddress();
+        int[] typeIndexes = catchHandler.getTypeIndexes();
+        int[] addresses = catchHandler.getAddresses();
+
+        if (catchAllAddress != -1) {
+            codeOut.writeSleb128(-typeIndexes.length);
+        } else {
+            codeOut.writeSleb128(typeIndexes.length);
+        }
+
+        for (int i = 0; i < typeIndexes.length; i++) {
+            codeOut.writeUleb128(indexMap.adjustType(typeIndexes[i]));
+            codeOut.writeUleb128(addresses[i]);
+        }
+
+        if (catchAllAddress != -1) {
+            codeOut.writeUleb128(catchAllAddress);
+        }
+    }
+
+    private void transformStaticValues(DexBuffer.Section in, IndexMap indexMap) {
+        contentsOut.encodedArrays.size++;
+        new EncodedValueTransformer(in, indexMap, encodedArrayOut).readArray();
+    }
+
+    /**
+     * Byte counts for the sections written when creating a dex. Target sizes
+     * are defined in one of two ways:
+     * <ul>
+     * <li>By pessimistically guessing how large the union of dex files will be.
+     *     We're pessimistic because we can't predict the amount of duplication
+     *     between dex files, nor can we predict the length of ULEB-encoded
+     *     offsets or indices.
+     * <li>By exactly measuring an existing dex.
+     * </ul>
+     */
+    private static class WriterSizes implements Cloneable {
+        private int header = SizeOf.HEADER_ITEM;
+        private int idsDefs;
+        private int mapList;
+        private int typeList;
+        private int classData;
+        private int code;
+        private int stringData;
+        private int debugInfo;
+        private int encodedArray;
+        private int annotationsDirectory;
+        private int annotationsSet;
+        private int annotationsSetRefList;
+        private int annotation;
+
+        /**
+         * Compute sizes for merging a and b.
+         */
+        public WriterSizes(DexBuffer a, DexBuffer b) {
+            plus(a.getTableOfContents(), false);
+            plus(b.getTableOfContents(), false);
+        }
+
+        @Override public WriterSizes clone() {
+            try {
+                return (WriterSizes) super.clone();
+            } catch (CloneNotSupportedException e) {
+                throw new AssertionError();
+            }
+        }
+
+        public void plus(TableOfContents contents, boolean exact) {
+            idsDefs += contents.stringIds.size * SizeOf.STRING_ID_ITEM
+                    + contents.typeIds.size * SizeOf.TYPE_ID_ITEM
+                    + contents.protoIds.size * SizeOf.PROTO_ID_ITEM
+                    + contents.fieldIds.size * SizeOf.MEMBER_ID_ITEM
+                    + contents.methodIds.size * SizeOf.MEMBER_ID_ITEM
+                    + contents.classDefs.size * SizeOf.CLASS_DEF_ITEM;
+            mapList = SizeOf.UINT + (contents.sections.length * SizeOf.MAP_ITEM);
+            typeList += contents.typeLists.byteCount;
+            code += contents.codes.byteCount;
+            stringData += contents.stringDatas.byteCount;
+            debugInfo += contents.debugInfos.byteCount;
+            annotationsDirectory += contents.annotationsDirectories.byteCount;
+            annotationsSet += contents.annotationSets.byteCount;
+            annotationsSetRefList += contents.annotationSetRefLists.byteCount;
+
+            if (exact) {
+                classData += contents.classDatas.byteCount;
+                encodedArray += contents.encodedArrays.byteCount;
+                annotation += contents.annotations.byteCount;
+            } else {
+                classData += (int) Math.ceil(contents.classDatas.byteCount * 1.34);
+                encodedArray += contents.encodedArrays.byteCount * 2;
+                annotation += contents.annotations.byteCount * 2;
+            }
+        }
+
+        public void minusWaste(DexMerger dexMerger) {
+            header -= dexMerger.headerOut.remaining();
+            idsDefs -= dexMerger.idsDefsOut.remaining();
+            mapList -= dexMerger.mapListOut.remaining();
+            typeList -= dexMerger.typeListOut.remaining();
+            classData -= dexMerger.classDataOut.remaining();
+            code -= dexMerger.codeOut.remaining();
+            stringData -= dexMerger.stringDataOut.remaining();
+            debugInfo -= dexMerger.debugInfoOut.remaining();
+            encodedArray -= dexMerger.encodedArrayOut.remaining();
+            annotationsDirectory -= dexMerger.annotationsDirectoryOut.remaining();
+            annotationsSet -= dexMerger.annotationSetOut.remaining();
+            annotationsSetRefList -= dexMerger.annotationSetRefListOut.remaining();
+            annotation -= dexMerger.annotationOut.remaining();
+        }
+
+        public int size() {
+            return header + idsDefs + mapList + typeList + classData + code + stringData + debugInfo
+                    + encodedArray + annotationsDirectory + annotationsSet + annotationsSetRefList
+                    + annotation;
+        }
+    }
+
+    public static void main(String[] args) throws IOException {
+        if (args.length != 3) {
+            printUsage();
+            return;
+        }
+
+        DexBuffer dexA = new DexBuffer();
+        dexA.loadFrom(new File(args[1]));
+        DexBuffer dexB = new DexBuffer();
+        dexB.loadFrom(new File(args[2]));
+
+        DexBuffer merged = new DexMerger(dexA, dexB).merge();
+        merged.writeTo(new File(args[0]));
+    }
+
+    private static void printUsage() {
+        System.out.println("Usage: DexMerger <out.dex> <a.dex> <b.dex>");
+        System.out.println();
+        System.out.println("If both a and b define the same classes, a's copy will be used.");
+    }
+}
diff --git a/dx/src/com/android/dx/merge/EncodedValueTransformer.java b/dx/src/com/android/dx/merge/EncodedValueTransformer.java
new file mode 100644
index 0000000..0959f15
--- /dev/null
+++ b/dx/src/com/android/dx/merge/EncodedValueTransformer.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.io.DexBuffer;
+import com.android.dx.io.EncodedValueReader;
+import com.android.dx.util.Unsigned;
+
+public final class EncodedValueTransformer extends EncodedValueReader {
+    private final IndexMap indexMap;
+    private final DexBuffer.Section out;
+
+    public EncodedValueTransformer(DexBuffer.Section in, IndexMap indexMap, DexBuffer.Section out) {
+        super(in);
+        this.indexMap = indexMap;
+        this.out = out;
+    }
+
+    protected void visitArray(int size) {
+        out.writeUleb128(size);
+    }
+
+    protected void visitAnnotation(int typeIndex, int size) {
+        out.writeUleb128(indexMap.adjustType(typeIndex));
+        out.writeUleb128(size);
+    }
+
+    protected void visitAnnotationName(int index) {
+        out.writeUleb128(indexMap.adjustString(index));
+    }
+
+    protected void visitPrimitive(int argAndType, int type, int arg, int size) {
+        out.writeByte(argAndType);
+        copyBytes(in, out, size);
+    }
+
+    protected void visitString(int type, int index) {
+        writeTypeAndSizeAndIndex(type, indexMap.adjustString(index), out);
+    }
+
+    protected void visitType(int type, int index) {
+        writeTypeAndSizeAndIndex(type, indexMap.adjustType(index), out);
+    }
+
+    protected void visitField(int type, int index) {
+        writeTypeAndSizeAndIndex(type, indexMap.adjustField(index), out);
+    }
+
+    protected void visitMethod(int type, int index) {
+        writeTypeAndSizeAndIndex(type, indexMap.adjustMethod(index), out);
+    }
+
+    protected void visitArrayValue(int argAndType) {
+        out.writeByte(argAndType);
+    }
+
+    protected void visitAnnotationValue(int argAndType) {
+        out.writeByte(argAndType);
+    }
+
+    protected void visitEncodedBoolean(int argAndType) {
+        out.writeByte(argAndType);
+    }
+
+    protected void visitEncodedNull(int argAndType) {
+        out.writeByte(argAndType);
+    }
+
+    private void writeTypeAndSizeAndIndex(int type, int index, DexBuffer.Section out) {
+        int byteCount;
+        if (Unsigned.compare(index, 0xff) <= 0) {
+            byteCount = 1;
+        } else if (Unsigned.compare(index, 0xffff) <= 0) {
+            byteCount = 2;
+        } else if (Unsigned.compare(index, 0xffffff) <= 0) {
+            byteCount = 3;
+        } else {
+            byteCount = 4;
+        }
+        int argAndType = ((byteCount - 1) << 5) | type;
+        out.writeByte(argAndType);
+
+        for (int i = 0; i < byteCount; i++) {
+            out.writeByte(index & 0xff);
+            index >>>= 8;
+        }
+    }
+
+    private void copyBytes(DexBuffer.Section in, DexBuffer.Section out, int size) {
+        for (int i = 0; i < size; i++) {
+            out.writeByte(in.readByte());
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/merge/IndexMap.java b/dx/src/com/android/dx/merge/IndexMap.java
new file mode 100644
index 0000000..26e87ae
--- /dev/null
+++ b/dx/src/com/android/dx/merge/IndexMap.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.dex.TableOfContents;
+import com.android.dx.io.ClassDef;
+import com.android.dx.io.DexBuffer;
+import com.android.dx.io.FieldId;
+import com.android.dx.io.MethodId;
+import com.android.dx.io.ProtoId;
+import java.util.HashMap;
+
+/**
+ * Maps the index offsets from one dex file to those in another. For example, if
+ * you have string #5 in the old dex file, its position in the new dex file is
+ * {@code strings[5]}.
+ */
+public final class IndexMap {
+    private final DexBuffer target;
+    public final int[] stringIds;
+    public final short[] typeIds;
+    public final short[] protoIds;
+    public final short[] fieldIds;
+    public final short[] methodIds;
+    public final HashMap<Integer, Integer> typeListOffsets;
+    public final HashMap<Integer, Integer> annotationSetOffsets;
+    public final HashMap<Integer, Integer> annotationDirectoryOffsets;
+
+    public IndexMap(DexBuffer target, TableOfContents tableOfContents) {
+        this.target = target;
+        this.stringIds = new int[tableOfContents.stringIds.size];
+        this.typeIds = new short[tableOfContents.typeIds.size];
+        this.protoIds = new short[tableOfContents.protoIds.size];
+        this.fieldIds = new short[tableOfContents.fieldIds.size];
+        this.methodIds = new short[tableOfContents.methodIds.size];
+        this.typeListOffsets = new HashMap<Integer, Integer>();
+        this.annotationSetOffsets = new HashMap<Integer, Integer>();
+        this.annotationDirectoryOffsets = new HashMap<Integer, Integer>();
+
+        /*
+         * A type list, annotation set, or annotation directory at offset 0 is
+         * always empty. Always map offset 0 to 0.
+         */
+        this.typeListOffsets.put(0, 0);
+        this.annotationSetOffsets.put(0, 0);
+        this.annotationDirectoryOffsets.put(0, 0);
+    }
+
+    public int adjustString(int stringIndex) {
+        return stringIndex == ClassDef.NO_INDEX ? ClassDef.NO_INDEX : stringIds[stringIndex];
+    }
+
+    public short adjustType(int typeIndex) {
+        return (typeIndex == ClassDef.NO_INDEX) ? ClassDef.NO_INDEX : typeIds[typeIndex];
+    }
+
+    public TypeList adjustTypeList(TypeList typeList) {
+        if (typeList == TypeList.EMPTY) {
+            return typeList;
+        }
+        short[] types = typeList.getTypes().clone();
+        for (int i = 0; i < types.length; i++) {
+            types[i] = adjustType(types[i]);
+        }
+        return new TypeList(target, types);
+    }
+
+    public short adjustProto(int protoIndex) {
+        return protoIds[protoIndex];
+    }
+
+    public short adjustField(int fieldIndex) {
+        return fieldIds[fieldIndex];
+    }
+
+    public short adjustMethod(int methodIndex) {
+        return methodIds[methodIndex];
+    }
+
+    public int adjustTypeListOffset(int typeListOffset) {
+        return typeListOffsets.get(typeListOffset);
+    }
+
+    public int adjustAnnotationSet(int annotationSetOffset) {
+        return annotationSetOffsets.get(annotationSetOffset);
+    }
+
+    public int adjustAnnotationDirectory(int annotationDirectoryOffset) {
+        return annotationDirectoryOffsets.get(annotationDirectoryOffset);
+    }
+
+    public MethodId adjust(MethodId methodId) {
+        return new MethodId(target,
+                adjustType(methodId.getDeclaringClassIndex()),
+                adjustProto(methodId.getProtoIndex()),
+                adjustString(methodId.getNameIndex()));
+    }
+
+    public FieldId adjust(FieldId fieldId) {
+        return new FieldId(target,
+                adjustType(fieldId.getDeclaringClassIndex()),
+                adjustType(fieldId.getTypeIndex()),
+                adjustString(fieldId.getNameIndex()));
+
+    }
+
+    public ProtoId adjust(ProtoId protoId) {
+        return new ProtoId(target,
+                adjustString(protoId.getShortyIndex()),
+                adjustType(protoId.getReturnTypeIndex()),
+                adjustTypeListOffset(protoId.getParametersOffset()));
+    }
+
+    public ClassDef adjust(ClassDef classDef) {
+        return new ClassDef(target, classDef.getOffset(), adjustType(classDef.getTypeIndex()),
+                classDef.getAccessFlags(), adjustType(classDef.getSupertypeIndex()),
+                adjustTypeListOffset(classDef.getInterfacesOffset()), classDef.getSourceFileIndex(),
+                classDef.getAnnotationsOffset(), classDef.getClassDataOffset(),
+                classDef.getStaticValuesOffset());
+    }
+
+    public SortableType adjust(SortableType sortableType) {
+        return new SortableType(sortableType.getBuffer(), adjust(sortableType.getClassDef()));
+    }
+}
diff --git a/dx/src/com/android/dx/merge/InstructionTransformer.java b/dx/src/com/android/dx/merge/InstructionTransformer.java
new file mode 100644
index 0000000..62c3a49
--- /dev/null
+++ b/dx/src/com/android/dx/merge/InstructionTransformer.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.io.CodeReader;
+import com.android.dx.io.instructions.DecodedInstruction;
+import com.android.dx.io.instructions.ShortArrayCodeOutput;
+import com.android.dx.util.DexException;
+
+final class InstructionTransformer {
+    private final IndexMap indexMap;
+    private final CodeReader reader;
+    private DecodedInstruction[] mappedInstructions;
+    private int mappedAt;
+
+    public InstructionTransformer(IndexMap indexMap) {
+        this.indexMap = indexMap;
+        this.reader = new CodeReader();
+        this.reader.setAllVisitors(new GenericVisitor());
+        this.reader.setStringVisitor(new StringVisitor());
+        this.reader.setTypeVisitor(new TypeVisitor());
+        this.reader.setFieldVisitor(new FieldVisitor());
+        this.reader.setMethodVisitor(new MethodVisitor());
+    }
+
+    public short[] transform(short[] encodedInstructions) throws DexException {
+        DecodedInstruction[] decodedInstructions =
+            DecodedInstruction.decodeAll(encodedInstructions);
+        int size = decodedInstructions.length;
+
+        mappedInstructions = new DecodedInstruction[size];
+        mappedAt = 0;
+        reader.visitAll(decodedInstructions);
+
+        ShortArrayCodeOutput out = new ShortArrayCodeOutput(size);
+        for (DecodedInstruction instruction : mappedInstructions) {
+            if (instruction != null) {
+                instruction.encode(out);
+            }
+        }
+
+        return out.getArray();
+    }
+
+    private class GenericVisitor implements CodeReader.Visitor {
+        public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+            mappedInstructions[mappedAt++] = one;
+        }
+    }
+
+    private class StringVisitor implements CodeReader.Visitor {
+        public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+            int stringId = one.getIndex();
+            int mappedId = indexMap.adjustString(stringId);
+            jumboCheck(stringId, mappedId);
+            mappedInstructions[mappedAt++] = one.withIndex(mappedId);
+        }
+    }
+
+    private class FieldVisitor implements CodeReader.Visitor {
+        public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+            int fieldId = one.getIndex();
+            int mappedId = indexMap.adjustField(fieldId);
+            jumboCheck(fieldId, mappedId);
+            mappedInstructions[mappedAt++] = one.withIndex(mappedId);
+        }
+    }
+
+    private class TypeVisitor implements CodeReader.Visitor {
+        public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+            int typeId = one.getIndex();
+            int mappedId = indexMap.adjustType(typeId);
+            jumboCheck(typeId, mappedId);
+            mappedInstructions[mappedAt++] = one.withIndex(mappedId);
+        }
+    }
+
+    private class MethodVisitor implements CodeReader.Visitor {
+        public void visit(DecodedInstruction[] all, DecodedInstruction one) {
+            int methodId = one.getIndex();
+            int mappedId = indexMap.adjustMethod(methodId);
+            jumboCheck(methodId, mappedId);
+            mappedInstructions[mappedAt++] = one.withIndex(mappedId);
+        }
+    }
+
+    private static void jumboCheck(int oldIndex, int newIndex) {
+        if ((oldIndex <= 0xffff) && (newIndex > 0xffff)) {
+            throw new DexException("Cannot handle conversion to jumbo index!");
+        }
+    }
+}
diff --git a/dx/src/com/android/dx/merge/SortableType.java b/dx/src/com/android/dx/merge/SortableType.java
new file mode 100644
index 0000000..838ea28
--- /dev/null
+++ b/dx/src/com/android/dx/merge/SortableType.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.io.ClassDef;
+import com.android.dx.io.DexBuffer;
+import java.util.Comparator;
+
+/**
+ * Name and structure of a type. Used to order types such that each type is
+ * preceded by its supertype and implemented interfaces.
+ */
+final class SortableType {
+    public static final Comparator<SortableType> NULLS_LAST_ORDER = new Comparator<SortableType>() {
+        public int compare(SortableType a, SortableType b) {
+            if (a == b) {
+                return 0;
+            }
+            if (b == null) {
+                return -1;
+            }
+            if (a == null) {
+                return 1;
+            }
+            if (a.depth != b.depth) {
+                return a.depth - b.depth;
+            }
+            return a.getTypeIndex() - b.getTypeIndex();
+        }
+    };
+
+    private final DexBuffer buffer;
+    private ClassDef classDef;
+    private int depth = -1;
+
+    public SortableType(DexBuffer buffer, ClassDef classDef) {
+        this.buffer = buffer;
+        this.classDef = classDef;
+    }
+
+    public DexBuffer getBuffer() {
+        return buffer;
+    }
+
+    public ClassDef getClassDef() {
+        return classDef;
+    }
+
+    public int getTypeIndex() {
+        return classDef.getTypeIndex();
+    }
+
+    /**
+     * Assigns this type's depth if the depths of its supertype and implemented
+     * interfaces are known. Returns false if the depth couldn't be computed
+     * yet.
+     */
+    public boolean tryAssignDepth(SortableType[] types) {
+        int max;
+        if (classDef.getSupertypeIndex() == ClassDef.NO_INDEX) {
+            max = 0; // this is Object.class or an interface
+        } else {
+            SortableType sortableSupertype = types[classDef.getSupertypeIndex()];
+            if (sortableSupertype == null) {
+                max = 1; // unknown, so assume it's a root.
+            } else if (sortableSupertype.depth == -1) {
+                return false;
+            } else {
+                max = sortableSupertype.depth;
+            }
+        }
+
+        for (short interfaceIndex : classDef.getInterfaces()) {
+            SortableType implemented = types[interfaceIndex];
+            if (implemented == null) {
+                max = Math.max(max, 1); // unknown, so assume it's a root.
+            } else if (implemented.depth == -1) {
+                return false;
+            } else {
+                max = Math.max(max, implemented.depth);
+            }
+        }
+
+        depth = max + 1;
+        return true;
+    }
+
+    public boolean isDepthAssigned() {
+        return depth != -1;
+    }
+}
diff --git a/dx/src/com/android/dx/merge/TypeList.java b/dx/src/com/android/dx/merge/TypeList.java
new file mode 100644
index 0000000..1619f19
--- /dev/null
+++ b/dx/src/com/android/dx/merge/TypeList.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.io.DexBuffer;
+import com.android.dx.util.Unsigned;
+import java.util.Arrays;
+
+public final class TypeList implements Comparable<TypeList> {
+
+    public static final TypeList EMPTY = new TypeList(null, new short[0]);
+
+    private final DexBuffer buffer;
+    private final short[] types;
+
+    public TypeList(DexBuffer buffer, short[] types) {
+        this.buffer = buffer;
+        this.types = types;
+    }
+
+    public short[] getTypes() {
+        return types;
+    }
+
+    public int compareTo(TypeList other) {
+        for (int i = 0; i < types.length && i < other.types.length; i++) {
+            if (types[i] != other.types[i]) {
+                return Unsigned.compare(types[i], other.types[i]);
+            }
+        }
+        return Unsigned.compare(types.length, other.types.length);
+    }
+
+    @Override public String toString() {
+        if (buffer == null) {
+            return Arrays.toString(types);
+        }
+
+        StringBuilder result = new StringBuilder();
+        result.append("[");
+        for (int i = 0, typesLength = types.length; i < typesLength; i++) {
+            short parameter = types[i];
+            if (i > 0) {
+                result.append(", ");
+            }
+            result.append(buffer.typeNames().get(parameter));
+        }
+        result.append("]");
+        return result.toString();
+    }
+}
diff --git a/dx/src/com/android/dx/rop/code/DexTranslationAdvice.java b/dx/src/com/android/dx/rop/code/DexTranslationAdvice.java
index 8dbc00b..35ce2f2 100644
--- a/dx/src/com/android/dx/rop/code/DexTranslationAdvice.java
+++ b/dx/src/com/android/dx/rop/code/DexTranslationAdvice.java
@@ -60,13 +60,20 @@
             return false;
         }
 
+        // Return false if second source isn't a constant
         if (! (sourceB.getTypeBearer() instanceof CstInteger)) {
-            return false;
+            // Except for rsub-int (reverse sub) where first source is constant
+            if (sourceA.getTypeBearer() instanceof CstInteger &&
+                    opcode.getOpcode() == RegOps.SUB) {
+                CstInteger cst = (CstInteger) sourceA.getTypeBearer();
+                return cst.fitsIn16Bits();
+            } else {
+                return false;
+            }
         }
 
         CstInteger cst = (CstInteger) sourceB.getTypeBearer();
 
-        // TODO handle rsub
         switch (opcode.getOpcode()) {
             // These have 8 and 16 bit cst representations
             case RegOps.REM:
@@ -82,6 +89,10 @@
             case RegOps.SHR:
             case RegOps.USHR:
                 return cst.fitsIn8Bits();
+            // No sub-const insn, so check if equivalent add-const fits
+            case RegOps.SUB:
+                CstInteger cst2 = CstInteger.make(-cst.getValue());
+                return cst2.fitsIn16Bits();
             default:
                 return false;
         }
diff --git a/dx/src/com/android/dx/rop/code/LocalVariableInfo.java b/dx/src/com/android/dx/rop/code/LocalVariableInfo.java
index 99a10ee..5d2b995 100644
--- a/dx/src/com/android/dx/rop/code/LocalVariableInfo.java
+++ b/dx/src/com/android/dx/rop/code/LocalVariableInfo.java
@@ -115,7 +115,11 @@
         }
 
         RegisterSpecSet newStart = start.mutableCopy();
-        newStart.intersect(specs, true);
+        if (start.size() != 0) {
+            newStart.intersect(specs, true);
+        } else {
+            newStart = specs.mutableCopy();
+        }
 
         if (start.equals(newStart)) {
             return false;
diff --git a/dx/src/com/android/dx/rop/code/RegisterSpecList.java b/dx/src/com/android/dx/rop/code/RegisterSpecList.java
index e900787..3d891fd 100644
--- a/dx/src/com/android/dx/rop/code/RegisterSpecList.java
+++ b/dx/src/com/android/dx/rop/code/RegisterSpecList.java
@@ -20,6 +20,8 @@
 import com.android.dx.rop.type.TypeList;
 import com.android.dx.util.FixedSizeList;
 
+import java.util.BitSet;
+
 /**
  * List of {@link RegisterSpec} instances.
  */
@@ -291,6 +293,39 @@
     }
 
     /**
+     * Returns a new instance, which contains a subset of the elements
+     * specified by the given BitSet. Indexes in the BitSet with a zero
+     * are included, while indexes with a one are excluded. Mutability
+     * of the result is inherited from the original.
+     *
+     * @param exclusionSet {@code non-null;} set of registers to exclude
+     * @return {@code non-null;} an appropriately-constructed instance
+     */
+    public RegisterSpecList subset(BitSet exclusionSet) {
+        int newSize = size() - exclusionSet.cardinality();
+
+        if (newSize == 0) {
+            return EMPTY;
+        }
+
+        RegisterSpecList result = new RegisterSpecList(newSize);
+
+        int newIndex = 0;
+        for (int oldIndex = 0; oldIndex < size(); oldIndex++) {
+            if (!exclusionSet.get(oldIndex)) {
+                result.set0(newIndex, get0(oldIndex));
+                newIndex++;
+            }
+        }
+
+        if (isImmutable()) {
+            result.setImmutable();
+        }
+
+        return result;
+    }
+
+    /**
      * Returns an instance that is identical to this one, except that
      * all register numbers are offset by the given amount. Mutability
      * of the result is inherited from the original.
@@ -324,15 +359,20 @@
 
     /**
      * Returns an instance that is identical to this one, except that
-     * all register numbers are renumbered sequentially from the given
-     * base, with the first number duplicated if indicated.
+     * all incompatible register numbers are renumbered sequentially from
+     * the given base, with the first number duplicated if indicated. If
+     * a null BitSet is given, it indicates all registers are compatible.
      *
      * @param base the base register number
      * @param duplicateFirst whether to duplicate the first number
+     * @param compatRegs {@code null-ok;} either a {@code non-null} set of
+     * compatible registers, or {@code null} to indicate all registers are
+     * compatible
      * @return {@code non-null;} an appropriately-constructed instance
      */
-    public RegisterSpecList withSequentialRegisters(int base,
-                                                    boolean duplicateFirst) {
+    public RegisterSpecList withExpandedRegisters(int base,
+                                                  boolean duplicateFirst,
+                                                  BitSet compatRegs) {
         int sz = size();
 
         if (sz == 0) {
@@ -344,11 +384,19 @@
 
         for (int i = 0; i < sz; i++) {
             RegisterSpec one = (RegisterSpec) get0(i);
-            result.set0(i, one.withReg(base));
+            boolean replace = (compatRegs == null) ? true : !compatRegs.get(i);
+
+            if (replace) {
+                result.set0(i, one.withReg(base));
+                if (!duplicateFirst) {
+                    base += one.getCategory();
+                }
+            } else {
+                result.set0(i, one);
+            }
+
             if (duplicateFirst) {
                 duplicateFirst = false;
-            } else {
-                base += one.getCategory();
             }
         }
 
@@ -358,5 +406,4 @@
 
         return result;
     }
-
 }
diff --git a/dx/src/com/android/dx/ssa/ConstCollector.java b/dx/src/com/android/dx/ssa/ConstCollector.java
index bc8d137..cf6c019 100644
--- a/dx/src/com/android/dx/ssa/ConstCollector.java
+++ b/dx/src/com/android/dx/ssa/ConstCollector.java
@@ -172,7 +172,7 @@
         for (int i = 0; i < regSz; i++) {
             SsaInsn insn = ssaMeth.getDefinitionForRegister(i);
 
-            if (insn == null) continue;
+            if (insn == null || insn.getOpcode() == null) continue;
 
             RegisterSpec result = insn.getResult();
             TypeBearer typeBearer = result.getTypeBearer();
diff --git a/dx/src/com/android/dx/ssa/DeadCodeRemover.java b/dx/src/com/android/dx/ssa/DeadCodeRemover.java
index 2a29050..07fb553 100644
--- a/dx/src/com/android/dx/ssa/DeadCodeRemover.java
+++ b/dx/src/com/android/dx/ssa/DeadCodeRemover.java
@@ -78,7 +78,9 @@
      * Runs the dead code remover.
      */
     private void run() {
-        HashSet<SsaInsn> deletedInsns = (HashSet<SsaInsn>) new HashSet();
+        pruneDeadInstructions();
+
+        HashSet<SsaInsn> deletedInsns = new HashSet<SsaInsn>();
 
         ssaMeth.forEachInsn(new NoSideEffectVisitor(worklist));
 
@@ -125,6 +127,49 @@
     }
 
     /**
+     * Removes all instructions from every unreachable block.
+     */
+    private void pruneDeadInstructions() {
+        HashSet<SsaInsn> deletedInsns = new HashSet<SsaInsn>();
+
+        ssaMeth.computeReachability();
+
+        for (SsaBasicBlock block : ssaMeth.getBlocks()) {
+            if (block.isReachable()) continue;
+
+            // Prune instructions from unreachable blocks
+            for (int i = 0; i < block.getInsns().size(); i++) {
+                SsaInsn insn = block.getInsns().get(i);
+                RegisterSpecList sources = insn.getSources();
+                int sourcesSize = sources.size();
+
+                // Delete this instruction completely if it has sources
+                if (sourcesSize != 0) {
+                    deletedInsns.add(insn);
+                }
+
+                // Delete this instruction from all usage lists.
+                for (int j = 0; j < sourcesSize; j++) {
+                    RegisterSpec source = sources.get(j);
+                    useList[source.getReg()].remove(insn);
+                }
+
+                // Remove this instruction result from the sources of any phis
+                RegisterSpec result = insn.getResult();
+                if (result == null) continue;
+                for (SsaInsn use : useList[result.getReg()]) {
+                    if (use instanceof PhiInsn) {
+                        PhiInsn phiUse = (PhiInsn) use;
+                        phiUse.removePhiRegister(result);
+                    }
+                }
+            }
+        }
+
+        ssaMeth.deleteInsns(deletedInsns);
+    }
+
+    /**
      * Returns true if the only uses of this register form a circle of
      * operations with no side effects.
      *
diff --git a/dx/src/com/android/dx/ssa/LiteralOpUpgrader.java b/dx/src/com/android/dx/ssa/LiteralOpUpgrader.java
index 01d818d..e36c6ff 100644
--- a/dx/src/com/android/dx/ssa/LiteralOpUpgrader.java
+++ b/dx/src/com/android/dx/ssa/LiteralOpUpgrader.java
@@ -16,6 +16,7 @@
 
 package com.android.dx.ssa;
 
+import com.android.dx.rop.code.PlainCstInsn;
 import com.android.dx.rop.code.TranslationAdvice;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.code.Insn;
@@ -24,9 +25,12 @@
 import com.android.dx.rop.code.PlainInsn;
 import com.android.dx.rop.code.Rops;
 import com.android.dx.rop.code.RegOps;
+import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstLiteralBits;
+import com.android.dx.rop.type.Type;
 import com.android.dx.rop.type.TypeBearer;
 
+import java.util.ArrayList;
 import java.util.List;
 
 /**
@@ -93,6 +97,9 @@
                 Rop opcode = originalRopInsn.getOpcode();
                 RegisterSpecList sources = insn.getSources();
 
+                // Replace insns with constant results with const insns
+                if (tryReplacingWithConstant(insn)) return;
+
                 if (sources.size() != 2 ) {
                     // We're only dealing with two-source insns here.
                     return;
@@ -104,10 +111,10 @@
                      */
                     if (isConstIntZeroOrKnownNull(sources.get(0))) {
                         replacePlainInsn(insn, sources.withoutFirst(),
-                                RegOps.flippedIfOpcode(opcode.getOpcode()));
+                              RegOps.flippedIfOpcode(opcode.getOpcode()), null);
                     } else if (isConstIntZeroOrKnownNull(sources.get(1))) {
                         replacePlainInsn(insn, sources.withoutLast(),
-                                opcode.getOpcode());
+                              opcode.getOpcode(), null);
                     }
                 } else if (advice.hasConstantOperation(
                         opcode, sources.get(0), sources.get(1))) {
@@ -130,26 +137,64 @@
     }
 
     /**
+     * Tries to replace an instruction with a const instruction. The given
+     * instruction must have a constant result for it to be replaced.
+     *
+     * @param insn {@code non-null;} instruction to try to replace
+     * @return true if the instruction was replaced
+     */
+    private boolean tryReplacingWithConstant(NormalSsaInsn insn) {
+        Insn originalRopInsn = insn.getOriginalRopInsn();
+        Rop opcode = originalRopInsn.getOpcode();
+
+        if (insn.getResult() != null && opcode.getOpcode() != RegOps.CONST) {
+            TypeBearer type = insn.getResult().getTypeBearer();
+            if (type.isConstant() && type.getBasicType() == Type.BT_INT) {
+                // Replace the instruction with a constant
+                replacePlainInsn(insn, RegisterSpecList.EMPTY,
+                        RegOps.CONST, (Constant) type);
+
+                // Remove the source as well if this is a move-result-pseudo
+                if (opcode.getOpcode() == RegOps.MOVE_RESULT_PSEUDO) {
+                    int pred = insn.getBlock().getPredecessors().nextSetBit(0);
+                    ArrayList<SsaInsn> predInsns =
+                            ssaMeth.getBlocks().get(pred).getInsns();
+                    NormalSsaInsn sourceInsn =
+                            (NormalSsaInsn) predInsns.get(predInsns.size()-1);
+                    replacePlainInsn(sourceInsn, RegisterSpecList.EMPTY,
+                            RegOps.GOTO, null);
+                }
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
      * Replaces an SsaInsn containing a PlainInsn with a new PlainInsn. The
-     * new PlainInsn is contructed with a new RegOp and new sources.
+     * new PlainInsn is constructed with a new RegOp and new sources.
      *
      * TODO move this somewhere else.
      *
      * @param insn {@code non-null;} an SsaInsn containing a PlainInsn
      * @param newSources {@code non-null;} new sources list for new insn
      * @param newOpcode A RegOp from {@link RegOps}
+     * @param cst {@code null-ok;} constant for new instruction, if any
      */
     private void replacePlainInsn(NormalSsaInsn insn,
-            RegisterSpecList newSources, int newOpcode) {
+            RegisterSpecList newSources, int newOpcode, Constant cst) {
 
         Insn originalRopInsn = insn.getOriginalRopInsn();
-        Rop newRop = Rops.ropFor(newOpcode,
-                insn.getResult(), newSources, null);
-        Insn newRopInsn = new PlainInsn(newRop,
-                originalRopInsn.getPosition(), insn.getResult(),
-                newSources);
-        NormalSsaInsn newInsn
-                = new NormalSsaInsn(newRopInsn, insn.getBlock());
+        Rop newRop = Rops.ropFor(newOpcode, insn.getResult(), newSources, cst);
+        Insn newRopInsn;
+        if (cst == null) {
+            newRopInsn = new PlainInsn(newRop, originalRopInsn.getPosition(),
+                    insn.getResult(), newSources);
+        } else {
+            newRopInsn = new PlainCstInsn(newRop, originalRopInsn.getPosition(),
+                    insn.getResult(), newSources, cst);
+        }
+        NormalSsaInsn newInsn = new NormalSsaInsn(newRopInsn, insn.getBlock());
 
         List<SsaInsn> insns = insn.getBlock().getInsns();
 
diff --git a/dx/src/com/android/dx/ssa/Optimizer.java b/dx/src/com/android/dx/ssa/Optimizer.java
index 06ed138..42ae166 100644
--- a/dx/src/com/android/dx/ssa/Optimizer.java
+++ b/dx/src/com/android/dx/ssa/Optimizer.java
@@ -158,6 +158,8 @@
 
         if (steps.contains(OptionalStep.SCCP)) {
             SCCP.process(ssaMeth);
+            DeadCodeRemover.process(ssaMeth);
+            needsDeadCodeRemover = false;
         }
 
         if (steps.contains(OptionalStep.LITERAL_UPGRADE)) {
diff --git a/dx/src/com/android/dx/ssa/PhiInsn.java b/dx/src/com/android/dx/ssa/PhiInsn.java
index 3ea876f..bc9c4b0 100644
--- a/dx/src/com/android/dx/ssa/PhiInsn.java
+++ b/dx/src/com/android/dx/ssa/PhiInsn.java
@@ -73,6 +73,7 @@
     }
 
     /** {@inheritDoc} */
+    @Override
     public PhiInsn clone() {
         throw new UnsupportedOperationException("can't clone phi");
     }
@@ -134,6 +135,25 @@
     }
 
     /**
+     * Removes all operand uses of a register from this phi instruction.
+     *
+     * @param registerSpec register spec, including type and reg of operand
+     */
+    public void removePhiRegister(RegisterSpec registerSpec) {
+        ArrayList<Operand> operandsToRemove = new ArrayList<Operand>();
+        for (Operand o : operands) {
+            if (o.regSpec.getReg() == registerSpec.getReg()) {
+                operandsToRemove.add(o);
+            }
+        }
+
+        operands.removeAll(operandsToRemove);
+
+        // Un-cache sources, in case someone has already called getSources().
+        sources = null;
+    }
+
+    /**
      * Gets the index of the pred block associated with the RegisterSpec
      * at the particular getSources() index.
      *
@@ -180,6 +200,7 @@
      *
      * @return {@code non-null;} sources list
      */
+    @Override
     public RegisterSpecList getSources() {
         if (sources != null) {
             return sources;
diff --git a/dx/src/com/android/dx/ssa/SCCP.java b/dx/src/com/android/dx/ssa/SCCP.java
index 42abbb2..3e101e3 100644
--- a/dx/src/com/android/dx/ssa/SCCP.java
+++ b/dx/src/com/android/dx/ssa/SCCP.java
@@ -18,10 +18,12 @@
 
 import com.android.dx.rop.code.CstInsn;
 import com.android.dx.rop.code.Insn;
+import com.android.dx.rop.code.PlainInsn;
 import com.android.dx.rop.code.RegOps;
 import com.android.dx.rop.code.RegisterSpecList;
 import com.android.dx.rop.code.Rop;
 import com.android.dx.rop.code.RegisterSpec;
+import com.android.dx.rop.code.Rops;
 import com.android.dx.rop.cst.Constant;
 import com.android.dx.rop.cst.CstInteger;
 import com.android.dx.rop.cst.TypedConstant;
@@ -50,6 +52,8 @@
     private Constant[] latticeConstants;
     /** Worklist of basic blocks to be processed */
     private ArrayList<SsaBasicBlock> cfgWorklist;
+    /** Worklist of executed basic blocks with phis to be processed */
+    private ArrayList<SsaBasicBlock> cfgPhiWorklist;
     /** Bitset containing bits for each block that has been found executable */
     private BitSet executableBlocks;
     /** Worklist for SSA edges.  This is a list of registers to process */
@@ -60,6 +64,8 @@
      * possible.
      */
     private ArrayList<SsaInsn> varyingWorklist;
+    /** Worklist of potential branches to convert to gotos */
+    private ArrayList<SsaInsn> branchWorklist;
 
     private SCCP(SsaMethod ssaMeth) {
         this.ssaMeth = ssaMeth;
@@ -67,9 +73,11 @@
         this.latticeValues = new int[this.regCount];
         this.latticeConstants = new Constant[this.regCount];
         this.cfgWorklist = new ArrayList<SsaBasicBlock>();
+        this.cfgPhiWorklist = new ArrayList<SsaBasicBlock>();
         this.executableBlocks = new BitSet(ssaMeth.getBlocks().size());
         this.ssaWorklist = new ArrayList<SsaInsn>();
         this.varyingWorklist = new ArrayList<SsaInsn>();
+        this.branchWorklist = new ArrayList<SsaInsn>();
         for (int i = 0; i < this.regCount; i++) {
             latticeValues[i] = TOP;
             latticeConstants[i] = null;
@@ -85,13 +93,16 @@
     }
 
     /**
-     * Add a new SSA basic block to the CFG worklist
+     * Adds a SSA basic block to the CFG worklist if it's unexecuted, or
+     * to the CFG phi worklist if it's already executed.
      * @param ssaBlock Block to add
      */
     private void addBlockToWorklist(SsaBasicBlock ssaBlock) {
         if (!executableBlocks.get(ssaBlock.getIndex())) {
             cfgWorklist.add(ssaBlock);
             executableBlocks.set(ssaBlock.getIndex());
+        } else {
+            cfgPhiWorklist.add(ssaBlock);
         }
     }
 
@@ -139,7 +150,7 @@
 
     /**
      * Simulates a PHI node and set the lattice for the result
-     * to the approriate value.
+     * to the appropriate value.
      * Meet values:
      * TOP x anything = anything
      * VARYING x anything = VARYING
@@ -200,6 +211,21 @@
             }
         }
     }
+
+    /**
+     * Simulate the phis in a block and note the results in the lattice.
+     * @param block Block to visit
+     */
+    private void simulatePhiBlock(SsaBasicBlock block) {
+        for (SsaInsn insn : block.getInsns()) {
+            if (insn instanceof PhiInsn) {
+                simulatePhi((PhiInsn) insn);
+            } else {
+                return;
+            }
+        }
+    }
+
     private static String latticeValName(int latticeVal) {
         switch (latticeVal) {
             case TOP: return "TOP";
@@ -210,21 +236,132 @@
     }
 
     /**
-     * Simplifies a jump statement.
-     * @param insn jump to simplify
-     * @return an instruction representing the simplified jump.
+     * Simulates branch insns, if possible. Adds reachable successor blocks
+     * to the CFG worklists.
+     * @param insn branch to simulate
      */
-    private Insn simplifyJump (Insn insn) {
-        return insn;
+    private void simulateBranch(SsaInsn insn) {
+        Rop opcode = insn.getOpcode();
+        RegisterSpecList sources = insn.getSources();
+
+        boolean constantBranch = false;
+        boolean constantSuccessor = false;
+
+        // Check if the insn is a branch with a constant condition
+        if (opcode.getBranchingness() == Rop.BRANCH_IF) {
+            Constant cA = null;
+            Constant cB = null;
+
+            int regA = sources.get(0).getReg();
+            if (latticeValues[regA] == CONSTANT) {
+                cA = latticeConstants[regA];
+            }
+
+            if (sources.size() == 2) {
+                int regB = sources.get(1).getReg();
+                if (latticeValues[regB] == CONSTANT) {
+                    cB = latticeConstants[regB];
+                }
+            }
+
+            // Calculate the result of the condition
+            if (cA != null && sources.size() == 1) {
+                switch (((TypedConstant) cA).getBasicType()) {
+                    case Type.BT_INT:
+                        constantBranch = true;
+                        int vA = ((CstInteger) cA).getValue();
+                        switch (opcode.getOpcode()) {
+                            case RegOps.IF_EQ:
+                                constantSuccessor = (vA == 0);
+                                break;
+                            case RegOps.IF_NE:
+                                constantSuccessor = (vA != 0);
+                                break;
+                            case RegOps.IF_LT:
+                                constantSuccessor = (vA < 0);
+                                break;
+                            case RegOps.IF_GE:
+                                constantSuccessor = (vA >= 0);
+                                break;
+                            case RegOps.IF_LE:
+                                constantSuccessor = (vA <= 0);
+                                break;
+                            case RegOps.IF_GT:
+                                constantSuccessor = (vA > 0);
+                                break;
+                            default:
+                                throw new RuntimeException("Unexpected op");
+                        }
+                        break;
+                    default:
+                        // not yet supported
+                }
+            } else if (cA != null && cB != null) {
+                switch (((TypedConstant) cA).getBasicType()) {
+                    case Type.BT_INT:
+                        constantBranch = true;
+                        int vA = ((CstInteger) cA).getValue();
+                        int vB = ((CstInteger) cB).getValue();
+                        switch (opcode.getOpcode()) {
+                            case RegOps.IF_EQ:
+                                constantSuccessor = (vA == vB);
+                                break;
+                            case RegOps.IF_NE:
+                                constantSuccessor = (vA != vB);
+                                break;
+                            case RegOps.IF_LT:
+                                constantSuccessor = (vA < vB);
+                                break;
+                            case RegOps.IF_GE:
+                                constantSuccessor = (vA >= vB);
+                                break;
+                            case RegOps.IF_LE:
+                                constantSuccessor = (vA <= vB);
+                                break;
+                            case RegOps.IF_GT:
+                                constantSuccessor = (vA > vB);
+                                break;
+                            default:
+                                throw new RuntimeException("Unexpected op");
+                        }
+                        break;
+                    default:
+                        // not yet supported
+                }
+            }
+        }
+
+        /*
+         * If condition is constant, add only the target block to the
+         * worklist. Otherwise, add all successors to the worklist.
+         */
+        SsaBasicBlock block = insn.getBlock();
+
+        if (constantBranch) {
+            int successorBlock;
+            if (constantSuccessor) {
+                successorBlock = block.getSuccessorList().get(1);
+            } else {
+                successorBlock = block.getSuccessorList().get(0);
+            }
+            addBlockToWorklist(ssaMeth.getBlocks().get(successorBlock));
+            branchWorklist.add(insn);
+        } else {
+            for (int i = 0; i < block.getSuccessorList().size(); i++) {
+                int successorBlock = block.getSuccessorList().get(i);
+                addBlockToWorklist(ssaMeth.getBlocks().get(successorBlock));
+            }
+        }
     }
 
     /**
      * Simulates math insns, if possible.
      *
      * @param insn non-null insn to simulate
+     * @param resultType basic type of the result
      * @return constant result or null if not simulatable.
      */
-    private Constant simulateMath(SsaInsn insn) {
+    private Constant simulateMath(SsaInsn insn, int resultType) {
         Insn ropInsn = insn.getOriginalRopInsn();
         int opcode = insn.getOpcode().getOpcode();
         RegisterSpecList sources = insn.getSources();
@@ -255,7 +392,7 @@
             return null;
         }
 
-        switch (insn.getResult().getBasicType()) {
+        switch (resultType) {
             case Type.BT_INT:
                 int vR;
                 boolean skip=false;
@@ -268,7 +405,12 @@
                         vR = vA + vB;
                         break;
                     case RegOps.SUB:
-                        vR = vA - vB;
+                        // 1 source for reverse sub, 2 sources for regular sub
+                        if (sources.size() == 1) {
+                            vR = vB - vA;
+                        } else {
+                            vR = vA - vB;
+                        }
                         break;
                     case RegOps.MUL:
                         vR = vA * vB;
@@ -300,7 +442,12 @@
                         vR = vA >>> vB;
                         break;
                     case RegOps.REM:
-                        vR = vA % vB;
+                        if (vB == 0) {
+                            skip = true;
+                            vR = 0; // just to hide a warning
+                        } else {
+                            vR = vA % vB;
+                        }
                         break;
                     default:
                         throw new RuntimeException("Unexpected op");
@@ -322,25 +469,24 @@
         Insn ropInsn = insn.getOriginalRopInsn();
         if (ropInsn.getOpcode().getBranchingness() != Rop.BRANCH_NONE
                 || ropInsn.getOpcode().isCallLike()) {
-            ropInsn = simplifyJump (ropInsn);
-            /* TODO: If jump becomes constant, only take true edge. */
-            SsaBasicBlock block = insn.getBlock();
-            int successorSize = block.getSuccessorList().size();
-            for (int i = 0; i < successorSize; i++) {
-                int successor = block.getSuccessorList().get(i);
-                addBlockToWorklist(ssaMeth.getBlocks().get(successor));
-            }
+            simulateBranch(insn);
         }
 
-        if (insn.getResult() == null) {
-            return;
+        int opcode = insn.getOpcode().getOpcode();
+        RegisterSpec result = insn.getResult();
+
+        // Find corresponding move-result-pseudo result for div and rem
+        if (opcode == RegOps.DIV || opcode == RegOps.REM) {
+            SsaBasicBlock succ = insn.getBlock().getPrimarySuccessor();
+            result = succ.getInsns().get(0).getResult();
         }
 
-        /* TODO: Simplify statements when possible using the constants. */
-        int resultReg = insn.getResult().getReg();
+        if (result == null) return;
+
+        int resultReg = result.getReg();
         int resultValue = VARYING;
         Constant resultConstant = null;
-        int opcode = insn.getOpcode().getOpcode();
+
         switch (opcode) {
             case RegOps.CONST: {
                 CstInsn cstInsn = (CstInsn)ropInsn;
@@ -356,7 +502,6 @@
                 }
                 break;
             }
-
             case RegOps.ADD:
             case RegOps.SUB:
             case RegOps.MUL:
@@ -367,18 +512,22 @@
             case RegOps.SHL:
             case RegOps.SHR:
             case RegOps.USHR:
-            case RegOps.REM:
-
-                resultConstant = simulateMath(insn);
-
-                if (resultConstant == null) {
-                    resultValue = VARYING;
-                } else {
+            case RegOps.REM: {
+                resultConstant = simulateMath(insn, result.getBasicType());
+                if (resultConstant != null) {
                     resultValue = CONSTANT;
                 }
-            break;
-            /* TODO: Handle non-int arithmetic.
-               TODO: Eliminate check casts that we can prove the type of. */
+                break;
+            }
+            case RegOps.MOVE_RESULT_PSEUDO: {
+                if (latticeValues[resultReg] == CONSTANT) {
+                    resultValue = latticeValues[resultReg];
+                    resultConstant = latticeConstants[resultReg];
+                }
+                break;
+            }
+            // TODO: Handle non-int arithmetic.
+            // TODO: Eliminate check casts that we can prove the type of.
             default: {}
         }
         if (setLatticeValueTo(resultReg, resultValue, resultConstant)) {
@@ -392,6 +541,7 @@
 
         /* Empty all the worklists by propagating our values */
         while (!cfgWorklist.isEmpty()
+                || !cfgPhiWorklist.isEmpty()
                 || !ssaWorklist.isEmpty()
                 || !varyingWorklist.isEmpty()) {
             while (!cfgWorklist.isEmpty()) {
@@ -399,6 +549,13 @@
                 SsaBasicBlock block = cfgWorklist.remove(listSize);
                 simulateBlock(block);
             }
+
+            while (!cfgPhiWorklist.isEmpty()) {
+                int listSize = cfgPhiWorklist.size() - 1;
+                SsaBasicBlock block = cfgPhiWorklist.remove(listSize);
+                simulatePhiBlock(block);
+            }
+
             while (!varyingWorklist.isEmpty()) {
                 int listSize = varyingWorklist.size() - 1;
                 SsaInsn insn = varyingWorklist.remove(listSize);
@@ -430,6 +587,7 @@
         }
 
         replaceConstants();
+        replaceBranches();
     }
 
     /**
@@ -458,6 +616,12 @@
                 continue;
             }
 
+            // Update the destination RegisterSpec with the constant value
+            RegisterSpec dest = defn.getResult();
+            RegisterSpec newDest
+                    = dest.withType((TypedConstant)latticeConstants[reg]);
+            defn.setResult(newDest);
+
             /*
              * Update the sources RegisterSpec's of all non-move uses.
              * These will be used in later steps.
@@ -480,4 +644,34 @@
             }
         }
     }
+
+    /**
+     * Replaces branches that have constant conditions with gotos
+     */
+    private void replaceBranches() {
+        for (SsaInsn insn : branchWorklist) {
+            // Find if a successor block is never executed
+            int oldSuccessor = -1;
+            SsaBasicBlock block = insn.getBlock();
+            int successorSize = block.getSuccessorList().size();
+            for (int i = 0; i < successorSize; i++) {
+                int successorBlock = block.getSuccessorList().get(i);
+                if (!executableBlocks.get(successorBlock)) {
+                    oldSuccessor = successorBlock;
+                }
+            }
+
+            /*
+             * Prune branches that have already been handled and ones that no
+             * longer have constant conditions (no nonexecutable successors)
+             */
+            if (successorSize != 2 || oldSuccessor == -1) continue;
+
+            // Replace branch with goto
+            Insn originalRopInsn = insn.getOriginalRopInsn();
+            block.replaceLastInsn(new PlainInsn(Rops.GOTO,
+                originalRopInsn.getPosition(), null, RegisterSpecList.EMPTY));
+            block.removeSuccessor(oldSuccessor);
+        }
+    }
 }
diff --git a/dx/src/com/android/dx/ssa/back/FirstFitLocalCombiningAllocator.java b/dx/src/com/android/dx/ssa/back/FirstFitLocalCombiningAllocator.java
index 0cffcfa..ce4dcca 100644
--- a/dx/src/com/android/dx/ssa/back/FirstFitLocalCombiningAllocator.java
+++ b/dx/src/com/android/dx/ssa/back/FirstFitLocalCombiningAllocator.java
@@ -52,6 +52,9 @@
     /** list of invoke-range instructions seen in this method */
     private final ArrayList<NormalSsaInsn> invokeRangeInsns;
 
+    /** list of phi instructions seen in this method */
+    private final ArrayList<PhiInsn> phiInsns;
+
     /** indexed by SSA reg; the set of SSA regs we've mapped */
     private final BitSet ssaRegsMapped;
 
@@ -104,6 +107,7 @@
         localVariables = new TreeMap<LocalItem, ArrayList<RegisterSpec>>();
         moveResultPseudoInsns = new ArrayList<NormalSsaInsn>();
         invokeRangeInsns = new ArrayList<NormalSsaInsn>();
+        phiInsns = new ArrayList<PhiInsn>();
     }
 
     /** {@inheritDoc} */
@@ -139,6 +143,9 @@
         if (DEBUG) System.out.println("--->Mapping check-cast results");
         handleCheckCastResults();
 
+        if (DEBUG) System.out.println("--->Mapping phis");
+        handlePhiInsns();
+
         if (DEBUG) System.out.println("--->Mapping others");
         handleNormalUnassociated();
 
@@ -449,8 +456,9 @@
     }
 
     /**
-     * Handles check cast results to reuse the same source register if
-     * possible.
+     * Handles check cast results to reuse the same source register.
+     * Inserts a move if it can't map the same register to both and the
+     * check cast is not caught.
      */
     private void handleCheckCastResults() {
         for (NormalSsaInsn insn : moveResultPseudoInsns) {
@@ -479,28 +487,59 @@
             RegisterSpec checkRegSpec = checkCastInsn.getSources().get(0);
             int checkReg = checkRegSpec.getReg();
 
-            // Assume none of the register is mapped yet
-            int ropReg = 0;
-
             /**
              * See if either register is already mapped. Most likely the move
              * result will be mapped already since the cast result is stored
              * in a local variable.
              */
-            if (ssaRegsMapped.get(moveReg)) {
-                ropReg = mapper.oldToNew(moveReg);
-            } else if (ssaRegsMapped.get(checkReg)) {
-                ropReg = mapper.oldToNew(checkReg);
-            }
-
-            ArrayList<RegisterSpec> ssaRegs = new ArrayList<RegisterSpec>(2);
-            ssaRegs.add(moveRegSpec);
-            ssaRegs.add(checkRegSpec);
             int category = checkRegSpec.getCategory();
-
-            while (!tryMapRegs(ssaRegs, ropReg, category, false)) {
-                ropReg = findNextUnreservedRopReg(ropReg + 1, category);
+            boolean moveMapped = ssaRegsMapped.get(moveReg);
+            boolean checkMapped = ssaRegsMapped.get(checkReg);
+            if (moveMapped & !checkMapped) {
+                int moveRopReg = mapper.oldToNew(moveReg);
+                checkMapped = tryMapReg(checkRegSpec, moveRopReg, category);
             }
+            if (checkMapped & !moveMapped) {
+                int checkRopReg = mapper.oldToNew(checkReg);
+                moveMapped = tryMapReg(moveRegSpec, checkRopReg, category);
+            }
+
+            // Map any unmapped registers to anything available
+            if (!moveMapped || !checkMapped) {
+                int ropReg = paramRangeEnd;
+                ArrayList<RegisterSpec> ssaRegs =
+                    new ArrayList<RegisterSpec>(2);
+                ssaRegs.add(moveRegSpec);
+                ssaRegs.add(checkRegSpec);
+
+                while (!tryMapRegs(ssaRegs, ropReg, category, false)) {
+                    ropReg = findNextUnreservedRopReg(ropReg + 1, category);
+                }
+            }
+
+            /*
+             * If source and result have a different mapping, insert a move so
+             * they can have the same mapping. Don't do this if the check cast
+             * is caught, since it will overwrite a potentially live value.
+             */
+            boolean hasExceptionHandlers =
+                checkCastInsn.getOriginalRopInsn().getCatches().size() != 0;
+            int moveRopReg = mapper.oldToNew(moveReg);
+            int checkRopReg = mapper.oldToNew(checkReg);
+            if (moveRopReg != checkRopReg && !hasExceptionHandlers) {
+                ((NormalSsaInsn) checkCastInsn).changeOneSource(0,
+                        insertMoveBefore(checkCastInsn, checkRegSpec));
+                addMapping(checkCastInsn.getSources().get(0), moveRopReg);
+            }
+        }
+    }
+
+    /**
+    * Handles all phi instructions, trying to map them to a common register.
+    */
+    private void handlePhiInsns() {
+        for (PhiInsn insn : phiInsns) {
+            processPhiInsn(insn);
         }
     }
 
@@ -522,7 +561,7 @@
 
             int category = ssaSpec.getCategory();
             // Find a rop reg that does not interfere
-            int ropReg = findNextUnreservedRopReg(0, category);
+            int ropReg = paramRangeEnd;
             while (!canMapReg(ssaSpec, ropReg)) {
                 ropReg = findNextUnreservedRopReg(ropReg + 1, category);
             }
@@ -623,6 +662,8 @@
                             insn.getSources())) {
                         invokeRangeInsns.add((NormalSsaInsn) insn);
                     }
+                } else if (insn instanceof PhiInsn) {
+                    phiInsns.add((PhiInsn) insn);
                 }
 
             }
@@ -955,4 +996,131 @@
 
         return null;
     }
+
+    /**
+     * Attempts to map the sources and result of a phi to a common register.
+     * Will try existing mappings first, from most to least common. If none
+     * of the registers have mappings yet, a new mapping is created.
+     */
+    private void processPhiInsn(PhiInsn insn) {
+        RegisterSpec result = insn.getResult();
+        int resultReg = result.getReg();
+        int category = result.getCategory();
+
+        RegisterSpecList sources = insn.getSources();
+        int sourcesSize = sources.size();
+
+        // List of phi sources / result that need mapping
+        ArrayList<RegisterSpec> ssaRegs = new ArrayList<RegisterSpec>();
+
+        // Track how many times a particular mapping is found
+        Multiset mapSet = new Multiset(sourcesSize + 1);
+
+        /*
+         * If the result of the phi has an existing mapping, get it.
+         * Otherwise, add it to the list of regs that need mapping.
+         */
+        if (ssaRegsMapped.get(resultReg)) {
+            mapSet.add(mapper.oldToNew(resultReg));
+        } else {
+            ssaRegs.add(result);
+        }
+
+        for (int i = 0; i < sourcesSize; i++) {
+            RegisterSpec source = sources.get(i);
+            int sourceReg = source.getReg();
+
+            /*
+             * If a source of the phi has an existing mapping, get it.
+             * Otherwise, add it to the list of regs that need mapping.
+             */
+            if (ssaRegsMapped.get(sourceReg)) {
+                mapSet.add(mapper.oldToNew(sourceReg));
+            } else {
+                ssaRegs.add(source);
+            }
+        }
+
+        // Try all existing mappings, with the most common ones first
+        for (int i = 0; i < mapSet.getSize(); i++) {
+            int maxReg = mapSet.getAndRemoveHighestCount();
+            tryMapRegs(ssaRegs, maxReg, category, false);
+        }
+
+        // Map any remaining unmapped regs with whatever fits
+        int mapReg = findNextUnreservedRopReg(0, category);
+        while (!tryMapRegs(ssaRegs, mapReg, category, false)) {
+            mapReg = findNextUnreservedRopReg(mapReg + 1, category);
+        }
+    }
+
+    // A set that tracks how often elements are added to it.
+    private static class Multiset {
+        private final int[] reg;
+        private final int[] count;
+        private int size;
+
+        /**
+         * Constructs an instance.
+         *
+         * @param maxSize the maximum distinct elements the set may have
+         */
+        public Multiset(int maxSize) {
+            reg = new int[maxSize];
+            count = new int[maxSize];
+            size = 0;
+        }
+
+        /**
+         * Adds an element to the set.
+         *
+         * @param element element to add
+         */
+        public void add(int element) {
+            for (int i = 0; i < size; i++) {
+                if (reg[i] == element) {
+                    count[i]++;
+                    return;
+                }
+            }
+
+            reg[size] = element;
+            count[size] = 1;
+            size++;
+        }
+
+        /**
+         * Searches the set for the element that has been added the most.
+         * In the case of a tie, the element that was added first is returned.
+         * Then, it clears the count on that element. The size of the set
+         * remains unchanged.
+         *
+         * @return element with the highest count
+         */
+        public int getAndRemoveHighestCount() {
+            int maxIndex = -1;
+            int maxReg = -1;
+            int maxCount = 0;
+
+            for (int i = 0; i < size; i++) {
+                if (maxCount < count[i]) {
+                    maxIndex = i;
+                    maxReg = reg[i];
+                    maxCount = count[i];
+                }
+            }
+
+            count[maxIndex] = 0;
+            return maxReg;
+        }
+
+        /**
+         * Gets the number of distinct elements in the set.
+         *
+         * @return size of the set
+         */
+        public int getSize() {
+            return size;
+        }
+    }
 }
diff --git a/dx/src/com/android/dx/ssa/back/SsaToRop.java b/dx/src/com/android/dx/ssa/back/SsaToRop.java
index d9d2c45..0e30250 100644
--- a/dx/src/com/android/dx/ssa/back/SsaToRop.java
+++ b/dx/src/com/android/dx/ssa/back/SsaToRop.java
@@ -18,31 +18,25 @@
 
 import com.android.dx.rop.code.BasicBlock;
 import com.android.dx.rop.code.BasicBlockList;
-import com.android.dx.rop.code.CstInsn;
 import com.android.dx.rop.code.InsnList;
-import com.android.dx.rop.code.RegOps;
 import com.android.dx.rop.code.RegisterSpec;
 import com.android.dx.rop.code.RegisterSpecList;
+import com.android.dx.rop.code.Rop;
 import com.android.dx.rop.code.RopMethod;
 import com.android.dx.rop.code.Rops;
-import com.android.dx.rop.code.Rop;
-import com.android.dx.rop.cst.CstInteger;
-import com.android.dx.ssa.NormalSsaInsn;
 import com.android.dx.ssa.BasicRegisterMapper;
 import com.android.dx.ssa.PhiInsn;
 import com.android.dx.ssa.RegisterMapper;
 import com.android.dx.ssa.SsaBasicBlock;
 import com.android.dx.ssa.SsaInsn;
 import com.android.dx.ssa.SsaMethod;
-import com.android.dx.util.IntList;
 import com.android.dx.util.Hex;
+import com.android.dx.util.IntList;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
 
 /**
  * Converts a method in SSA form to ROP form.
@@ -254,8 +248,8 @@
         ssaMeth.computeReachability();
         int ropBlockCount = ssaMeth.getCountReachableBlocks();
 
-        // Don't count the exit block, if it exists.
-        ropBlockCount -= (exitBlock == null) ? 0 : 1;
+        // Don't count the exit block, if it exists and is reachable.
+        ropBlockCount -= (exitBlock != null && exitBlock.isReachable()) ? 1 : 0;
 
         BasicBlockList result = new BasicBlockList(ropBlockCount);
 
diff --git a/dx/src/com/android/dx/util/ByteArrayAnnotatedOutput.java b/dx/src/com/android/dx/util/ByteArrayAnnotatedOutput.java
index 6d0615e..def49a6 100644
--- a/dx/src/com/android/dx/util/ByteArrayAnnotatedOutput.java
+++ b/dx/src/com/android/dx/util/ByteArrayAnnotatedOutput.java
@@ -224,39 +224,23 @@
     }
 
     /** {@inheritDoc} */
-    public int writeUnsignedLeb128(int value) {
-        int remaining = value >> 7;
-        int count = 0;
-
-        while (remaining != 0) {
-            writeByte((value & 0x7f) | 0x80);
-            value = remaining;
-            remaining >>= 7;
-            count++;
+    public int writeUleb128(int value) {
+        if (stretchy) {
+            ensureCapacity(cursor + 5); // pessimistic
         }
-
-        writeByte(value & 0x7f);
-        return count + 1;
+        int byteCount = Leb128Utils.writeUnsignedLeb128(data, cursor, value);
+        cursor += byteCount;
+        return byteCount;
     }
 
     /** {@inheritDoc} */
-    public int writeSignedLeb128(int value) {
-        int remaining = value >> 7;
-        int count = 0;
-        boolean hasMore = true;
-        int end = ((value & Integer.MIN_VALUE) == 0) ? 0 : -1;
-
-        while (hasMore) {
-            hasMore = (remaining != end)
-                || ((remaining & 1) != ((value >> 6) & 1));
-
-            writeByte((value & 0x7f) | (hasMore ? 0x80 : 0));
-            value = remaining;
-            remaining >>= 7;
-            count++;
+    public int writeSleb128(int value) {
+        if (stretchy) {
+            ensureCapacity(cursor + 5); // pessimistic
         }
-
-        return count;
+        int byteCount = Leb128Utils.writeSignedLeb128(data, cursor, value);
+        cursor += byteCount;
+        return byteCount;
     }
 
     /** {@inheritDoc} */
diff --git a/dx/src/com/android/dx/util/DexException.java b/dx/src/com/android/dx/util/DexException.java
new file mode 100644
index 0000000..527b0b9
--- /dev/null
+++ b/dx/src/com/android/dx/util/DexException.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+/**
+ * Thrown when there's a format problem reading, writing, or generally
+ * processing a dex file.
+ */
+public final class DexException extends ExceptionWithContext {
+    public DexException(String message) {
+        super(message);
+    }
+
+    public DexException(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/dx/src/com/android/dx/util/FileUtils.java b/dx/src/com/android/dx/util/FileUtils.java
index 098c5ab..bcf6729 100644
--- a/dx/src/com/android/dx/util/FileUtils.java
+++ b/dx/src/com/android/dx/util/FileUtils.java
@@ -89,4 +89,13 @@
 
         return result;
     }
+
+    /**
+     * Returns true if {@code fileName} names a .zip, .jar, or .apk.
+     */
+    public static boolean hasArchiveSuffix(String fileName) {
+        return fileName.endsWith(".zip")
+                || fileName.endsWith(".jar")
+                || fileName.endsWith(".apk");
+    }
 }
diff --git a/dx/src/com/android/dx/util/Leb128Utils.java b/dx/src/com/android/dx/util/Leb128Utils.java
index 5d450ea..73f7d32 100644
--- a/dx/src/com/android/dx/util/Leb128Utils.java
+++ b/dx/src/com/android/dx/util/Leb128Utils.java
@@ -16,8 +16,12 @@
 
 package com.android.dx.util;
 
+import java.io.DataInput;
+import java.io.IOException;
+
 /**
- * LEB128 (little-endian base 128) utilities.
+ * Reads and writes DWARFv3 LEB 128 signed and unsigned integers. See DWARF v3
+ * section 7.6.
  */
 public final class Leb128Utils {
     /**
@@ -74,4 +78,95 @@
 
         return count;
     }
+
+    /**
+     * Reads an signed integer from {@code in}.
+     */
+    public static int readSignedLeb128(DataInput in) throws IOException {
+        int result = 0;
+        int cur;
+        int count = 0;
+        int signBits = -1;
+
+        do {
+            cur = in.readByte() & 0xff;
+            result |= (cur & 0x7f) << (count * 7);
+            signBits <<= 7;
+            count++;
+        } while (((cur & 0x80) == 0x80) && count < 5);
+
+        if ((cur & 0x80) == 0x80) {
+            throw new IOException("invalid LEB128 sequence");
+        }
+
+        // Sign extend if appropriate
+        if (((signBits >> 1) & result) != 0 ) {
+            result |= signBits;
+        }
+
+        return result;
+    }
+
+    /**
+     * Reads an unsigned integer from {@code in}.
+     */
+    public static int readUnsignedLeb128(DataInput in) throws IOException {
+        int result = 0;
+        int cur;
+        int count = 0;
+
+        do {
+            cur = in.readByte() & 0xff;
+            result |= (cur & 0x7f) << (count * 7);
+            count++;
+        } while (((cur & 0x80) == 0x80) && count < 5);
+
+        if ((cur & 0x80) == 0x80) {
+            throw new IOException("invalid LEB128 sequence");
+        }
+
+        return result;
+    }
+
+    /**
+     * Writes {@code value} as an unsigned integer to {@code out}, starting at
+     * {@code offset}. Returns the number of bytes written.
+     */
+    public static int writeUnsignedLeb128(byte[] out, int offset, int value) {
+        int remaining = value >> 7;
+        int count = 0;
+
+        while (remaining != 0) {
+            out[offset + count] = (byte) ((value & 0x7f) | 0x80);
+            value = remaining;
+            remaining >>= 7;
+            count++;
+        }
+
+        out[offset + count] = (byte) (value & 0x7f);
+        return count + 1;
+    }
+
+    /**
+     * Writes {@code value} as a signed integer to {@code out}, starting at
+     * {@code offset}. Returns the number of bytes written.
+     */
+    public static int writeSignedLeb128(byte[] out, int offset, int value) {
+        int remaining = value >> 7;
+        int count = 0;
+        boolean hasMore = true;
+        int end = ((value & Integer.MIN_VALUE) == 0) ? 0 : -1;
+
+        while (hasMore) {
+            hasMore = (remaining != end)
+                    || ((remaining & 1) != ((value >> 6) & 1));
+
+            out[offset + count] = (byte) ((value & 0x7f) | (hasMore ? 0x80 : 0));
+            value = remaining;
+            remaining >>= 7;
+            count++;
+        }
+
+        return count;
+    }
 }
diff --git a/dx/src/com/android/dx/util/Mutf8.java b/dx/src/com/android/dx/util/Mutf8.java
new file mode 100644
index 0000000..ffa43a5
--- /dev/null
+++ b/dx/src/com/android/dx/util/Mutf8.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.UTFDataFormatException;
+
+/**
+ * Modified UTF-8 as described in the dex file format spec.
+ *
+ * <p>Derived from libcore's MUTF-8 encoder at java.nio.charset.ModifiedUtf8.
+ */
+public final class Mutf8 {
+    private Mutf8() {}
+
+    /**
+     * Decodes bytes from {@code in} into {@code out} until a delimiter 0x00 is
+     * encountered. Returns a new string containing the decoded characters.
+     */
+    public static String decode(DataInput in, char[] out) throws IOException {
+        int s = 0;
+        while (true) {
+            char a = (char) (in.readByte() & 0xff);
+            if (a == 0) {
+                return new String(out, 0, s);
+            }
+            out[s] = a;
+            if (a < '\u0080') {
+                s++;
+            } else if ((a & 0xe0) == 0xc0) {
+                int b = in.readByte() & 0xff;
+                if ((b & 0xC0) != 0x80) {
+                    throw new UTFDataFormatException("bad second byte");
+                }
+                out[s++] = (char) (((a & 0x1F) << 6) | (b & 0x3F));
+            } else if ((a & 0xf0) == 0xe0) {
+                int b = in.readByte() & 0xff;
+                int c = in.readByte() & 0xff;
+                if (((b & 0xC0) != 0x80) || ((c & 0xC0) != 0x80)) {
+                    throw new UTFDataFormatException("bad second or third byte");
+                }
+                out[s++] = (char) (((a & 0x0F) << 12) | ((b & 0x3F) << 6) | (c & 0x3F));
+            } else {
+                throw new UTFDataFormatException("bad byte");
+            }
+        }
+    }
+
+    /**
+     * Returns the number of bytes the modified UTF8 representation of 's' would take.
+     */
+    private static long countBytes(String s, boolean shortLength) throws UTFDataFormatException {
+        long result = 0;
+        final int length = s.length();
+        for (int i = 0; i < length; ++i) {
+            char ch = s.charAt(i);
+            if (ch != 0 && ch <= 127) { // U+0000 uses two bytes.
+                ++result;
+            } else if (ch <= 2047) {
+                result += 2;
+            } else {
+                result += 3;
+            }
+            if (shortLength && result > 65535) {
+                throw new UTFDataFormatException("String more than 65535 UTF bytes long");
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Encodes the modified UTF-8 bytes corresponding to {@code s} into  {@code
+     * dst}, starting at {@code offset}.
+     */
+    public static void encode(byte[] dst, int offset, String s) {
+        final int length = s.length();
+        for (int i = 0; i < length; i++) {
+            char ch = s.charAt(i);
+            if (ch != 0 && ch <= 127) { // U+0000 uses two bytes.
+                dst[offset++] = (byte) ch;
+            } else if (ch <= 2047) {
+                dst[offset++] = (byte) (0xc0 | (0x1f & (ch >> 6)));
+                dst[offset++] = (byte) (0x80 | (0x3f & ch));
+            } else {
+                dst[offset++] = (byte) (0xe0 | (0x0f & (ch >> 12)));
+                dst[offset++] = (byte) (0x80 | (0x3f & (ch >> 6)));
+                dst[offset++] = (byte) (0x80 | (0x3f & ch));
+            }
+        }
+    }
+
+    /**
+     * Returns an array containing the <i>modified UTF-8</i> form of {@code s}.
+     */
+    public static byte[] encode(String s) throws UTFDataFormatException {
+        int utfCount = (int) countBytes(s, true);
+        byte[] result = new byte[utfCount];
+        encode(result, 0, s);
+        return result;
+    }
+}
diff --git a/dx/src/com/android/dx/util/Output.java b/dx/src/com/android/dx/util/Output.java
index 402fa83..12eaa4c 100644
--- a/dx/src/com/android/dx/util/Output.java
+++ b/dx/src/com/android/dx/util/Output.java
@@ -75,7 +75,7 @@
      * @param value value to write, treated as an unsigned value
      * @return {@code 1..5;} the number of bytes actually written
      */
-    public int writeUnsignedLeb128(int value);
+    public int writeUleb128(int value);
 
     /**
      * Writes a DWARFv3-style unsigned LEB128 integer. For details,
@@ -85,7 +85,7 @@
      * @param value value to write
      * @return {@code 1..5;} the number of bytes actually written
      */
-    public int writeSignedLeb128(int value);
+    public int writeSleb128(int value);
 
     /**
      * Writes a {@link ByteArray} to this instance.
diff --git a/dx/src/com/android/dx/util/Uint.java b/dx/src/com/android/dx/util/Uint.java
new file mode 100644
index 0000000..039756a
--- /dev/null
+++ b/dx/src/com/android/dx/util/Uint.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+/**
+ * An unsigned integer.
+ */
+public final class Uint implements Comparable<Uint> {
+    public final int intValue;
+
+    public Uint(int value) {
+        this.intValue = value;
+    }
+
+    public int compareTo(Uint uint) {
+        return Unsigned.compare(intValue, uint.intValue);
+    }
+}
diff --git a/dx/src/com/android/dx/util/Unsigned.java b/dx/src/com/android/dx/util/Unsigned.java
new file mode 100644
index 0000000..f15bd86
--- /dev/null
+++ b/dx/src/com/android/dx/util/Unsigned.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+/**
+ * Unsigned arithmetic over Java's signed types.
+ */
+public final class Unsigned {
+    private Unsigned() {}
+
+    public static int compare(short ushortA, short ushortB) {
+        if (ushortA == ushortB) {
+            return 0;
+        }
+        int a = ushortA & 0xFFFF;
+        int b = ushortB & 0xFFFF;
+        return a < b ? -1 : 1;
+    }
+
+    public static int compare(int uintA, int uintB) {
+        if (uintA == uintB) {
+            return 0;
+        }
+        long a = uintA & 0xFFFFFFFFL;
+        long b = uintB & 0xFFFFFFFFL;
+        return a < b ? -1 : 1;
+    }
+}
diff --git a/dx/tests/045-dex-switch-ops/expected.txt b/dx/tests/045-dex-switch-ops/expected.txt
index 46476ea..80bc808 100644
--- a/dx/tests/045-dex-switch-ops/expected.txt
+++ b/dx/tests/045-dex-switch-ops/expected.txt
@@ -20,7 +20,7 @@
   0013: move v0, v2
   0014: goto 0008 // -000c
   0015: nop // spacer
-  0016: packed-switch-data // for switch @ 0003
+  0016: packed-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           2: 0000000c // +00000009
           3: 0000000f // +0000000c
@@ -46,7 +46,7 @@
   0012: const/16 v2, #int 50 // #0032
   0014: move v0, v2
   0015: goto 0008 // -000d
-  0016: sparse-switch-data // for switch @ 0003
+  0016: sparse-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           10: 0000000c // +00000009
           100: 0000000f // +0000000c
diff --git a/dx/tests/067-dex-switch-and-try/expected.txt b/dx/tests/067-dex-switch-and-try/expected.txt
index 5e55bf4..0a101d5 100644
--- a/dx/tests/067-dex-switch-and-try/expected.txt
+++ b/dx/tests/067-dex-switch-and-try/expected.txt
@@ -14,7 +14,7 @@
   000f: move v1, v3
   0010: goto 0006 // -000a
   0011: nop // spacer
-  0012: packed-switch-data // for switch @ 0003
+  0012: packed-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           2: 0000000d // +0000000a
 Blort.test2:(I)I:
@@ -37,7 +37,7 @@
   0015: move-object v2, v3
   0016: goto 0006 // -0010
   0017: nop // spacer
-  0018: packed-switch-data // for switch @ 0003
+  0018: packed-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           2: 00000010 // +0000000d
   catches
@@ -64,7 +64,7 @@
   0015: move v1, v3
   0016: goto 0006 // -0010
   0017: nop // spacer
-  0018: packed-switch-data // for switch @ 0003
+  0018: packed-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           2: 00000013 // +00000010
   catches
@@ -91,7 +91,7 @@
   0015: move v1, v3
   0016: goto 0006 // -0010
   0017: nop // spacer
-  0018: packed-switch-data // for switch @ 0003
+  0018: packed-switch-payload // for switch @ 0003
           1: 00000009 // +00000006
           2: 00000013 // +00000010
   catches
diff --git a/dx/tests/069-dex-source-position/expected.txt b/dx/tests/069-dex-source-position/expected.txt
index 853ee65..cd45497 100644
--- a/dx/tests/069-dex-source-position/expected.txt
+++ b/dx/tests/069-dex-source-position/expected.txt
@@ -8,30 +8,30 @@
   0006: return v0
   0007: move v2, v0
   0008: const/4 v3, #int 1 // #1
-  0009: sub-int/2addr v2, v3
-  000a: invoke-static {v2}, Blort.test:(I)I
-  000d: move-result v2
-  000e: move v0, v2
-  000f: move v2, v0
-  0010: move v3, v0
-  0011: const/4 v4, #int 2 // #2
-  0012: sub-int/2addr v3, v4
-  0013: invoke-static {v3}, Blort.test:(I)I
-  0016: move-result v3
-  0017: add-int/2addr v2, v3
-  0018: move v0, v2
-  0019: move v2, v0
+  0009: add-int/lit8 v2, v2, #int -1 // #ff
+  000b: invoke-static {v2}, Blort.test:(I)I
+  000e: move-result v2
+  000f: move v0, v2
+  0010: move v2, v0
+  0011: move v3, v0
+  0012: const/4 v4, #int 2 // #2
+  0013: add-int/lit8 v3, v3, #int -2 // #fe
+  0015: invoke-static {v3}, Blort.test:(I)I
+  0018: move-result v3
+  0019: add-int/2addr v2, v3
   001a: move v0, v2
-  001b: goto 0006 // -0015
-  001c: move-exception v2
-  001d: move-object v1, v2
-  001e: const/4 v2, #int 2 // #2
-  001f: move v0, v2
-  0020: goto 0006 // -001a
+  001b: move v2, v0
+  001c: move v0, v2
+  001d: goto 0006 // -0017
+  001e: move-exception v2
+  001f: move-object v1, v2
+  0020: const/4 v2, #int 2 // #2
+  0021: move v0, v2
+  0022: goto 0006 // -001c
   catches
     tries:
-      try 000a..000d
-      catch java.lang.RuntimeException -> 001c
+      try 000b..000e
+      catch java.lang.RuntimeException -> 001e
 Blort.test:(I)I:
 regs: 0006; ins: 0001; outs: 0001
   0000: move v0, v5
@@ -42,30 +42,30 @@
   0006: return v0
   0007: move v2, v0
   0008: const/4 v3, #int 1 // #1
-  0009: sub-int/2addr v2, v3
-  000a: invoke-static {v2}, Blort.test:(I)I
-  000d: move-result v2
-  000e: move v0, v2
-  000f: move v2, v0
-  0010: move v3, v0
-  0011: const/4 v4, #int 2 // #2
-  0012: sub-int/2addr v3, v4
-  0013: invoke-static {v3}, Blort.test:(I)I
-  0016: move-result v3
-  0017: add-int/2addr v2, v3
-  0018: move v0, v2
-  0019: move v2, v0
+  0009: add-int/lit8 v2, v2, #int -1 // #ff
+  000b: invoke-static {v2}, Blort.test:(I)I
+  000e: move-result v2
+  000f: move v0, v2
+  0010: move v2, v0
+  0011: move v3, v0
+  0012: const/4 v4, #int 2 // #2
+  0013: add-int/lit8 v3, v3, #int -2 // #fe
+  0015: invoke-static {v3}, Blort.test:(I)I
+  0018: move-result v3
+  0019: add-int/2addr v2, v3
   001a: move v0, v2
-  001b: goto 0006 // -0015
-  001c: move-exception v2
-  001d: move-object v1, v2
-  001e: const/4 v2, #int 2 // #2
-  001f: move v0, v2
-  0020: goto 0006 // -001a
+  001b: move v2, v0
+  001c: move v0, v2
+  001d: goto 0006 // -0017
+  001e: move-exception v2
+  001f: move-object v1, v2
+  0020: const/4 v2, #int 2 // #2
+  0021: move v0, v2
+  0022: goto 0006 // -001c
   catches
     tries:
-      try 000a..000d
-      catch java.lang.RuntimeException -> 001c
+      try 000b..000e
+      catch java.lang.RuntimeException -> 001e
   debug info
     line_start: 20
     parameters_size: 0001
@@ -76,10 +76,10 @@
     0006: line 29
     line = 24
     0007: line 24
-    000f: line 28
-    0019: line 29
-    001c: line 25
-    001e: line 26
+    0010: line 28
+    001b: line 29
+    001e: line 25
+    0020: line 26
     end sequence
   source file: "Blort.java"
 Blort.test:(I)I:
@@ -92,30 +92,30 @@
   0006: return v0
   0007: move v2, v0
   0008: const/4 v3, #int 1 // #1
-  0009: sub-int/2addr v2, v3
-  000a: invoke-static {v2}, Blort.test:(I)I
-  000d: move-result v2
-  000e: move v0, v2
-  000f: move v2, v0
-  0010: move v3, v0
-  0011: const/4 v4, #int 2 // #2
-  0012: sub-int/2addr v3, v4
-  0013: invoke-static {v3}, Blort.test:(I)I
-  0016: move-result v3
-  0017: add-int/2addr v2, v3
-  0018: move v0, v2
-  0019: move v2, v0
+  0009: add-int/lit8 v2, v2, #int -1 // #ff
+  000b: invoke-static {v2}, Blort.test:(I)I
+  000e: move-result v2
+  000f: move v0, v2
+  0010: move v2, v0
+  0011: move v3, v0
+  0012: const/4 v4, #int 2 // #2
+  0013: add-int/lit8 v3, v3, #int -2 // #fe
+  0015: invoke-static {v3}, Blort.test:(I)I
+  0018: move-result v3
+  0019: add-int/2addr v2, v3
   001a: move v0, v2
-  001b: goto 0006 // -0015
-  001c: move-exception v2
-  001d: move-object v1, v2
-  001e: const/4 v2, #int 2 // #2
-  001f: move v0, v2
-  0020: goto 0006 // -001a
+  001b: move v2, v0
+  001c: move v0, v2
+  001d: goto 0006 // -0017
+  001e: move-exception v2
+  001f: move-object v1, v2
+  0020: const/4 v2, #int 2 // #2
+  0021: move v0, v2
+  0022: goto 0006 // -001c
   catches
     tries:
-      try 000a..000d
-      catch java.lang.RuntimeException -> 001c
+      try 000b..000e
+      catch java.lang.RuntimeException -> 001e
   debug info
     line_start: 20
     parameters_size: 0001
@@ -126,9 +126,9 @@
     0006: line 29
     line = 24
     0007: line 24
-    000f: line 28
-    0019: line 29
-    001c: line 25
-    001e: line 26
+    0010: line 28
+    001b: line 29
+    001e: line 25
+    0020: line 26
     end sequence
   source file: "Blort.java"
diff --git a/dx/tests/072-dex-switch-edge-cases/expected.txt b/dx/tests/072-dex-switch-edge-cases/expected.txt
index 6659284..38a467a 100644
--- a/dx/tests/072-dex-switch-edge-cases/expected.txt
+++ b/dx/tests/072-dex-switch-edge-cases/expected.txt
@@ -18,7 +18,7 @@
   0009: const/4 v2, #int 0 // #0
   000a: move v0, v2
   000b: goto 0008 // -0003
-  000c: packed-switch-data // for switch @ 0003
+  000c: packed-switch-payload // for switch @ 0003
           0: 00000009 // +00000006
 Blort.test3:(I)I:
 regs: 0005; ins: 0002; outs: 0000
@@ -32,7 +32,7 @@
   0009: const/4 v2, #int 0 // #0
   000a: move v0, v2
   000b: goto 0008 // -0003
-  000c: packed-switch-data // for switch @ 0003
+  000c: packed-switch-payload // for switch @ 0003
           -2147483648: 00000009 // +00000006
 Blort.test4:(I)I:
 regs: 0005; ins: 0002; outs: 0000
@@ -46,7 +46,7 @@
   0009: const/4 v2, #int 0 // #0
   000a: move v0, v2
   000b: goto 0008 // -0003
-  000c: packed-switch-data // for switch @ 0003
+  000c: packed-switch-payload // for switch @ 0003
           2147483647: 00000009 // +00000006
 Blort.test5:(I)I:
 regs: 0005; ins: 0002; outs: 0000
@@ -64,7 +64,7 @@
   000d: move v0, v2
   000e: goto 0008 // -0006
   000f: nop // spacer
-  0010: sparse-switch-data // for switch @ 0003
+  0010: sparse-switch-payload // for switch @ 0003
           -2147483648: 0000000c // +00000009
           0: 00000009 // +00000006
 Blort.test6:(I)I:
@@ -83,7 +83,7 @@
   000d: move v0, v2
   000e: goto 0008 // -0006
   000f: nop // spacer
-  0010: sparse-switch-data // for switch @ 0003
+  0010: sparse-switch-payload // for switch @ 0003
           0: 00000009 // +00000006
           2147483647: 0000000c // +00000009
 Blort.test7:(I)I:
@@ -102,7 +102,7 @@
   000d: move v0, v2
   000e: goto 0008 // -0006
   000f: nop // spacer
-  0010: sparse-switch-data // for switch @ 0003
+  0010: sparse-switch-payload // for switch @ 0003
           -2147483648: 00000009 // +00000006
           2147483647: 0000000c // +00000009
 Blort.test8:(I)I:
@@ -121,6 +121,6 @@
   000d: move v0, v2
   000e: goto 0008 // -0006
   000f: nop // spacer
-  0010: sparse-switch-data // for switch @ 0003
+  0010: sparse-switch-payload // for switch @ 0003
           0: 00000009 // +00000006
           1288490184: 0000000c // +00000009
diff --git a/dx/tests/084-dex-high-register-moves/expected.txt b/dx/tests/084-dex-high-register-moves/expected.txt
index 33466c4..061e7d3 100644
--- a/dx/tests/084-dex-high-register-moves/expected.txt
+++ b/dx/tests/084-dex-high-register-moves/expected.txt
@@ -49,12 +49,10 @@
   005a: move-wide/from16 v22, v0
   005c: sput-wide v22, Blort.l:J
   005e: move-object/from16 v22, v21
-  0060: move-object/from16 v0, v22
-  0062: instance-of v0, v0, java.lang.String
-  0064: move/from16 v22, v0
-  0066: if-eqz v22, 006d // +0007
-  0068: const/16 v22, #int 0 // #0000
-  006a: sput v22, Blort.i:I
-  006c: return-void
-  006d: const/16 v22, #int 1 // #0001
-  006f: goto 006a // -0005
+  0060: instance-of/jumbo v22, v22, java.lang.String
+  0065: if-eqz v22, 006c // +0007
+  0067: const/16 v22, #int 0 // #0000
+  0069: sput v22, Blort.i:I
+  006b: return-void
+  006c: const/16 v22, #int 1 // #0001
+  006e: goto 0069 // -0005
diff --git a/dx/tests/091-ssa-const-collector/expected.txt b/dx/tests/091-ssa-const-collector/expected.txt
index 3427cba..b1c855a 100644
--- a/dx/tests/091-ssa-const-collector/expected.txt
+++ b/dx/tests/091-ssa-const-collector/expected.txt
@@ -98,7 +98,7 @@
   pred 006c
   Blort.java:24@000b: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 000e
 block 000e
@@ -109,7 +109,7 @@
   pred 006e
   Blort.java:25@0012: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 0015
 block 0015
@@ -120,7 +120,7 @@
   pred 0070
   Blort.java:26@0019: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 001c
 block 001c
@@ -131,7 +131,7 @@
   pred 0072
   Blort.java:27@0020: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 0023
 block 0023
@@ -142,7 +142,7 @@
   pred 0074
   Blort.java:28@0027: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 002a
 block 002a
@@ -153,12 +153,12 @@
   pred 0076
   Blort.java:29@002e: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v1:Lj
   ava/lang/String;="foo"
   next 0065
 block 0064
-  pred 007b
-  Blort.java:22@0000: move-param-object(0) v3:"this"LBlort; <- .
+  pred 0078
+  Blort.java:22@0000: move-param-object(0) v2:"this"LBlort; <- .
   Blort.java:22@0000: goto . <- .
   next 0000
 block 0065
@@ -209,16 +209,6 @@
   next 002e
 block 0078
   @????: goto . <- .
-  next 007a
-block 007a
-  pred 0078
-  @????: const-object("foo" catch) . <- .
-  next 007b
-block 007b
-  pred 007a
-  @????: Rop{move-result-pseudo Ljava/lang/String; <- . flows} v2:Ljava/lang/St
-  ring;="foo" <- .
-  @????: goto . <- .
   next 0064
 
 method testCaughtStrings ()V
@@ -242,7 +232,7 @@
   pred 0086
   Blort.java:35@000b: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v4:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
   ava/lang/String;="foo"
   next 000e
 block 000e
@@ -253,7 +243,7 @@
   pred 0088
   Blort.java:36@0012: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v4:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
   ava/lang/String;="foo"
   next 0015
 block 0015
@@ -264,7 +254,7 @@
   pred 008a
   Blort.java:37@0019: Rop{invoke-virtual . <- Ljava/lang/StringBuilder; Ljava/l
   ang/String; call throws <any>}(java.lang.StringBuilder.append:(Ljava/lang/Str
-  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v4:Lj
+  ing;)Ljava/lang/StringBuilder; catch) . <- v0:Ljava/lang/StringBuilder; v2:Lj
   ava/lang/String;="foo"
   next 001d
 block 001d
@@ -320,11 +310,11 @@
   pred 0093
   Blort.java:43@003b: Rop{invoke-virtual . <- Ljava/io/PrintStream; Ljava/lang/
   String; call throws <any>}(java.io.PrintStream.println:(Ljava/lang/String;)V 
-  catch) . <- v2:Ljava/io/PrintStream; v4:Ljava/lang/String;="foo"
+  catch) . <- v0:Ljava/io/PrintStream; v2:Ljava/lang/String;="foo"
   next 007f
 block 007e
-  pred 009e
-  Blort.java:33@0000: move-param-object(0) v5:"this"LBlort; <- .
+  pred 0094
+  Blort.java:33@0000: move-param-object(0) v3:"this"LBlort; <- .
   Blort.java:33@0000: goto . <- .
   next 0000
 block 007f
@@ -377,18 +367,18 @@
 block 0092
   pred 0035
   Blort.java:43@0036: Rop{move-result-pseudo Ljava/io/PrintStream; <- . flows} 
-  v2:Ljava/io/PrintStream; <- .
+  v0:Ljava/io/PrintStream; <- .
   Blort.java:43@0036: goto . <- .
   next 0039
 block 0093
   pred 0039
-  Blort.java:43@0039: Rop{move-result-pseudo Ljava/lang/String; <- . flows} v3:
+  Blort.java:43@0039: Rop{move-result-pseudo Ljava/lang/String; <- . flows} v2:
   Ljava/lang/String;="foo" <- .
   Blort.java:43@0039: goto . <- .
   next 003b
 block 0094
   @????: goto . <- .
-  next 009d
+  next 007e
 block 0095
   pred 001d
   pred 0020
@@ -396,21 +386,10 @@
   pred 0027
   pred 002a
   pred 002e
-  Blort.java:42@0035: Rop{move-exception Ljava/lang/Throwable; <- . flows} v2:L
+  Blort.java:42@0035: Rop{move-exception Ljava/lang/Throwable; <- . flows} v1:L
   java/lang/Throwable; <- .
-  @????: move-object v1:Ljava/lang/Throwable; <- v2:Ljava/lang/Throwable;
   @????: goto . <- .
   next 0035
-block 009d
-  pred 0094
-  @????: const-object("foo" catch) . <- .
-  next 009e
-block 009e
-  pred 009d
-  @????: Rop{move-result-pseudo Ljava/lang/String; <- . flows} v4:Ljava/lang/St
-  ring;="foo" <- .
-  @????: goto . <- .
-  next 007e
 
 method testLocalVars ()V
 first 0004
@@ -428,7 +407,7 @@
   @????: mark-local-int . <- v1:"b"I
   Blort.java:54@0011: const-int(10) v2:I=10 <- .
   @????: mark-local-int . <- v2:"c"I
-  Blort.java:56@0018: mul-const-int(10) v3:I <- v3:I
+  Blort.java:56@0018: const-int(100) v3:I=100 <- .
   @????: mark-local-int . <- v3:"i"I=100
   Blort.java:57@001a: goto . <- .
   next 0003
diff --git a/dx/tests/093-ssa-invoke-range/expected.txt b/dx/tests/093-ssa-invoke-range/expected.txt
index 5d9adb8..0506c4d 100644
--- a/dx/tests/093-ssa-invoke-range/expected.txt
+++ b/dx/tests/093-ssa-invoke-range/expected.txt
@@ -265,7 +265,7 @@
   next 0017
 block 0017
   pred 0004
-  Blort.java:63@0018: conv-i2l v4:J <- v3:I
+  Blort.java:63@0018: conv-i2l v6:J <- v3:I
   Blort.java:63@0019: goto . <- .
   next 003d
 block 001a
@@ -276,7 +276,6 @@
 block 001d
   pred 0043
   @????: mark-local-long . <- v6:"offset"J
-  @????: move-long v4:J <- v6:"offset"J
   Blort.java:66@001f: goto . <- .
   next 003d
 block 003c
@@ -288,7 +287,7 @@
 block 003d
   pred 0017
   pred 001d
-  Blort.java:66@001f: return-long . <- v4:J
+  Blort.java:66@001f: return-long . <- v6:J
   returns
 block 0043
   pred 001a
diff --git a/dx/tests/100-local-mismatch/expected.txt b/dx/tests/100-local-mismatch/expected.txt
index 235b206..0f77225 100644
--- a/dx/tests/100-local-mismatch/expected.txt
+++ b/dx/tests/100-local-mismatch/expected.txt
@@ -1,9 +1,9 @@
 TEST 1
-com.android.dx.cf.code.SimException: local variable type mismatch: attempt to set or access a value of type int using a local variable of type java.lang.Object. This is symptomatic of .class transformation tools that ignore local variable information.
+local variable type mismatch: attempt to set or access a value of type int using a local variable of type java.lang.Object. This is symptomatic of .class transformation tools that ignore local variable information.
 TEST 2
-com.android.dx.cf.code.SimException: local variable type mismatch: attempt to set or access a value of type java.lang.Object using a local variable of type int. This is symptomatic of .class transformation tools that ignore local variable information.
+local variable type mismatch: attempt to set or access a value of type java.lang.Object using a local variable of type int. This is symptomatic of .class transformation tools that ignore local variable information.
 TEST 3
-com.android.dx.cf.code.SimException: local variable type mismatch: attempt to set or access a value of type int using a local variable of type byte[]. This is symptomatic of .class transformation tools that ignore local variable information.
+local variable type mismatch: attempt to set or access a value of type int using a local variable of type byte[]. This is symptomatic of .class transformation tools that ignore local variable information.
 TEST 4
-com.android.dx.cf.code.SimException: local variable type mismatch: attempt to set or access a value of type java.lang.String using a local variable of type java.lang.Object[]. This is symptomatic of .class transformation tools that ignore local variable information.
+local variable type mismatch: attempt to set or access a value of type java.lang.String using a local variable of type java.lang.Object[]. This is symptomatic of .class transformation tools that ignore local variable information.
 DONE
diff --git a/dx/tests/115-merge/com/android/dx/merge/DexMergeTest.java b/dx/tests/115-merge/com/android/dx/merge/DexMergeTest.java
new file mode 100644
index 0000000..4126368
--- /dev/null
+++ b/dx/tests/115-merge/com/android/dx/merge/DexMergeTest.java
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.merge;
+
+import com.android.dx.io.DexBuffer;
+import dalvik.system.PathClassLoader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+import junit.framework.TestCase;
+
+/**
+ * Test that DexMerge works by merging dex files, and then loading them into
+ * the current VM.
+ */
+public final class DexMergeTest extends TestCase {
+
+    public void testFillArrayData() throws Exception {
+        ClassLoader loader = mergeAndLoad(
+                "/testdata/Basic.dex",
+                "/testdata/FillArrayData.dex");
+
+        Class<?> basic = loader.loadClass("testdata.Basic");
+        assertEquals(1, basic.getDeclaredMethods().length);
+
+        Class<?> fillArrayData = loader.loadClass("testdata.FillArrayData");
+        assertTrue(Arrays.equals(
+                new byte[] { 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, -112, -23, 121 },
+                (byte[]) fillArrayData.getMethod("newByteArray").invoke(null)));
+        assertTrue(Arrays.equals(
+                new char[] { 0xFFFF, 0x4321, 0xABCD, 0, 'a', 'b', 'c' },
+                (char[]) fillArrayData.getMethod("newCharArray").invoke(null)));
+        assertTrue(Arrays.equals(
+                new long[] { 4660046610375530309L, 7540113804746346429L, -6246583658587674878L },
+                (long[]) fillArrayData.getMethod("newLongArray").invoke(null)));
+    }
+
+    public void testTryCatchFinally() throws Exception {
+        ClassLoader loader = mergeAndLoad(
+                "/testdata/Basic.dex",
+                "/testdata/TryCatchFinally.dex");
+
+        Class<?> basic = loader.loadClass("testdata.Basic");
+        assertEquals(1, basic.getDeclaredMethods().length);
+
+        Class<?> tryCatchFinally = loader.loadClass("testdata.TryCatchFinally");
+        tryCatchFinally.getDeclaredMethod("method").invoke(null);
+    }
+
+    public void testStaticValues() throws Exception {
+        ClassLoader loader = mergeAndLoad(
+                "/testdata/Basic.dex",
+                "/testdata/StaticValues.dex");
+
+        Class<?> basic = loader.loadClass("testdata.Basic");
+        assertEquals(1, basic.getDeclaredMethods().length);
+
+        Class<?> staticValues = loader.loadClass("testdata.StaticValues");
+        assertEquals((byte) 1, staticValues.getField("a").get(null));
+        assertEquals((short) 2, staticValues.getField("b").get(null));
+        assertEquals('C', staticValues.getField("c").get(null));
+        assertEquals(0xabcd1234, staticValues.getField("d").get(null));
+        assertEquals(4660046610375530309L,staticValues.getField("e").get(null));
+        assertEquals(0.5f, staticValues.getField("f").get(null));
+        assertEquals(-0.25, staticValues.getField("g").get(null));
+        assertEquals("this is a String", staticValues.getField("h").get(null));
+        assertEquals(String.class, staticValues.getField("i").get(null));
+        assertEquals("[0, 1]", Arrays.toString((int[]) staticValues.getField("j").get(null)));
+        assertEquals(null, staticValues.getField("k").get(null));
+        assertEquals(true, staticValues.getField("l").get(null));
+        assertEquals(false, staticValues.getField("m").get(null));
+    }
+
+    public void testAnnotations() throws Exception {
+        ClassLoader loader = mergeAndLoad(
+                "/testdata/Basic.dex",
+                "/testdata/Annotated.dex");
+
+        Class<?> basic = loader.loadClass("testdata.Basic");
+        assertEquals(1, basic.getDeclaredMethods().length);
+
+        Class<?> annotated = loader.loadClass("testdata.Annotated");
+        Method method = annotated.getMethod("method", String.class, String.class);
+        Field field = annotated.getField("field");
+
+        @SuppressWarnings("unchecked")
+        Class<? extends Annotation> marker
+                = (Class<? extends Annotation>) loader.loadClass("testdata.Annotated$Marker");
+
+        assertEquals("@testdata.Annotated$Marker(a=on class, b=[A, B, C], "
+                + "c=@testdata.Annotated$Nested(e=E1, f=1695938256, g=7264081114510713000), "
+                + "d=[@testdata.Annotated$Nested(e=E2, f=1695938256, g=7264081114510713000)])",
+                annotated.getAnnotation(marker).toString());
+        assertEquals("@testdata.Annotated$Marker(a=on method, b=[], "
+                + "c=@testdata.Annotated$Nested(e=, f=0, g=0), d=[])",
+                method.getAnnotation(marker).toString());
+        assertEquals("@testdata.Annotated$Marker(a=on field, b=[], "
+                + "c=@testdata.Annotated$Nested(e=, f=0, g=0), d=[])",
+                field.getAnnotation(marker).toString());
+        assertEquals("@testdata.Annotated$Marker(a=on parameter, b=[], "
+                + "c=@testdata.Annotated$Nested(e=, f=0, g=0), d=[])",
+                method.getParameterAnnotations()[1][0].toString());
+    }
+
+    /**
+     * Merging dex files uses pessimistic sizes that naturally leave gaps in the
+     * output files. If those gaps grow too large, the merger is supposed to
+     * compact the result. This exercises that by repeatedly merging a dex with
+     * itself.
+     */
+    public void testMergedOutputSizeIsBounded() throws Exception {
+        /*
+         * At the time this test was written, the output would grow ~25% with
+         * each merge. Setting a low 1KiB ceiling on the maximum size caused
+         * the file to be compacted every four merges.
+         */
+        int steps = 100;
+        int compactWasteThreshold = 1024;
+
+        DexBuffer dexA = new DexBuffer();
+        DexBuffer dexB = new DexBuffer();
+        dexA.loadFrom(resourceToFile("/testdata/Basic.dex"));
+        dexB.loadFrom(resourceToFile("/testdata/TryCatchFinally.dex"));
+        DexBuffer merged = new DexMerger(dexA, dexB).merge();
+
+        int maxLength = 0;
+        for (int i = 0; i < steps; i++) {
+            DexMerger dexMerger = new DexMerger(dexA, merged);
+            dexMerger.setCompactWasteThreshold(compactWasteThreshold);
+            merged = dexMerger.merge();
+            maxLength = Math.max(maxLength, merged.getLength());
+        }
+
+        int maxExpectedLength = dexA.getLength() + dexB.getLength() + compactWasteThreshold;
+        assertTrue(maxLength + " < " + maxExpectedLength, maxLength < maxExpectedLength);
+    }
+
+    public ClassLoader mergeAndLoad(String dexAResource, String dexBResource) throws IOException {
+        DexBuffer dexA = new DexBuffer();
+        DexBuffer dexB = new DexBuffer();
+        dexA.loadFrom(resourceToFile(dexAResource));
+        dexB.loadFrom(resourceToFile(dexBResource));
+        DexBuffer merged = new DexMerger(dexA, dexB).merge();
+        File mergedDex = File.createTempFile("DexMergeTest", ".classes.dex");
+        merged.writeTo(mergedDex);
+        File mergedJar = dexToJar(mergedDex);
+        return new PathClassLoader(mergedJar.getPath(), getClass().getClassLoader());
+    }
+
+    private File resourceToFile(String resource) throws IOException {
+        File result = File.createTempFile("DexMergeTest", ".resource");
+        result.deleteOnExit();
+        FileOutputStream out = new FileOutputStream(result);
+        InputStream in = getClass().getResourceAsStream(resource);
+        if (in == null) {
+            throw new IllegalArgumentException("No such resource: " + resource);
+        }
+        copy(in, out);
+        out.close();
+        return result;
+    }
+
+    private File dexToJar(File dex) throws IOException {
+        File result = File.createTempFile("DexMergeTest", ".jar");
+        result.deleteOnExit();
+        JarOutputStream jarOut = new JarOutputStream(new FileOutputStream(result));
+        jarOut.putNextEntry(new JarEntry("classes.dex"));
+        copy(new FileInputStream(dex), jarOut);
+        jarOut.closeEntry();
+        jarOut.close();
+        return result;
+    }
+
+    private void copy(InputStream in, OutputStream out) throws IOException {
+        byte[] buffer = new byte[1024];
+        int count;
+        while ((count = in.read(buffer)) != -1) {
+            out.write(buffer, 0, count);
+        }
+        in.close();
+    }
+}
diff --git a/dx/tests/115-merge/expected.txt b/dx/tests/115-merge/expected.txt
new file mode 100644
index 0000000..5418338
--- /dev/null
+++ b/dx/tests/115-merge/expected.txt
@@ -0,0 +1 @@
+Yay!
diff --git a/dx/tests/115-merge/info.txt b/dx/tests/115-merge/info.txt
new file mode 100644
index 0000000..c1fa2e4
--- /dev/null
+++ b/dx/tests/115-merge/info.txt
@@ -0,0 +1,6 @@
+Merges two dex files into one and then loads the result.
+
+Because it loads the merged dex files, this JUnit test only works on a dalvikvm.
+The run script requires vogar, so you must have vogar on your $PATH to run this
+test. You'll also need a device or host VM for vogar to attach to.
+
diff --git a/dx/tests/115-merge/run b/dx/tests/115-merge/run
new file mode 100644
index 0000000..d1cf82d
--- /dev/null
+++ b/dx/tests/115-merge/run
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Find dx.jar from dx in the android dev tree
+prog=`which dx`
+progdir=`dirname "${prog}"`
+
+javac testdata/*.java
+dx --dex --output=testdata/Basic.dex testdata/Basic.class
+dx --dex --output=testdata/FillArrayData.dex testdata/FillArrayData.class
+dx --dex --output=testdata/StaticValues.dex testdata/StaticValues.class
+dx --dex --output=testdata/TryCatchFinally.dex testdata/TryCatchFinally.class
+jar cfM resources.jar testdata/*.dex
+
+vogar --classpath resources.jar \
+  --classpath $progdir/../framework/dx.jar \
+  com/android/dx/merge/DexMergeTest.java > unit-out.txt
+
+if [ "$?" = "0" ]; then
+    echo "Yay!"
+else
+    cat unit-out.txt
+fi
diff --git a/dx/tests/115-merge/testdata/Annotated.java b/dx/tests/115-merge/testdata/Annotated.java
new file mode 100644
index 0000000..2e893f2
--- /dev/null
+++ b/dx/tests/115-merge/testdata/Annotated.java
@@ -0,0 +1,31 @@
+package testdata;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+@Annotated.Marker(a = "on class", b = {"A", "B", "C" },
+        c = @Annotated.Nested(e="E1", f=1695938256, g=7264081114510713000L),
+        d = { @Annotated.Nested(e="E2", f=1695938256, g=7264081114510713000L) })
+public class Annotated {
+
+    @Annotated.Marker(a="on field")
+    public String field;
+
+    @Annotated.Marker(a="on method")
+    public void method(String a, @Annotated.Marker(a="on parameter") String b) {}
+
+    @Retention(RetentionPolicy.RUNTIME)
+    public @interface Marker {
+        String a() default "";
+        String[] b() default {};
+        Nested c() default @Nested;
+        Nested[] d() default {};
+    }
+
+    @Retention(RetentionPolicy.RUNTIME)
+    public @interface Nested {
+        String e() default "";
+        int f() default 0;
+        long g() default 0L;
+    }
+}
diff --git a/dx/tests/115-merge/testdata/Basic.java b/dx/tests/115-merge/testdata/Basic.java
new file mode 100644
index 0000000..01a1635
--- /dev/null
+++ b/dx/tests/115-merge/testdata/Basic.java
@@ -0,0 +1,10 @@
+package testdata;
+
+public class Basic {
+
+    String field = "this is a field";
+
+    String method() {
+        return "this is a method result";
+    }
+}
diff --git a/dx/tests/115-merge/testdata/FillArrayData.java b/dx/tests/115-merge/testdata/FillArrayData.java
new file mode 100644
index 0000000..0ece934
--- /dev/null
+++ b/dx/tests/115-merge/testdata/FillArrayData.java
@@ -0,0 +1,16 @@
+package testdata;
+
+public class FillArrayData {
+
+    public static byte[] newByteArray() {
+        return new byte[] { 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, -112, -23, 121 };
+    }
+
+    public static char[] newCharArray() {
+        return new char[] { 0xFFFF, 0x4321, 0xABCD, 0, 'a', 'b', 'c' };
+    }
+
+    public static long[] newLongArray() {
+        return new long[] { 4660046610375530309L, 7540113804746346429L, -6246583658587674878L };
+    }
+}
diff --git a/dx/tests/115-merge/testdata/StaticValues.java b/dx/tests/115-merge/testdata/StaticValues.java
new file mode 100644
index 0000000..1a8648f
--- /dev/null
+++ b/dx/tests/115-merge/testdata/StaticValues.java
@@ -0,0 +1,17 @@
+package testdata;
+
+public class StaticValues {
+    public static final byte a = 1;
+    public static final short b = 2;
+    public static final char c = 'C';
+    public static final int d = 0xabcd1234;
+    public static final long e = 4660046610375530309L;
+    public static final float f = 0.5f;
+    public static final double g = -0.25;
+    public static final String h = "this is a String";
+    public static final Class<?> i = String.class;
+    public static final int[] j = { 0, 1 };
+    public static final String k = null;
+    public static final boolean l = true;
+    public static final boolean m = false;
+}
diff --git a/dx/tests/115-merge/testdata/TryCatchFinally.java b/dx/tests/115-merge/testdata/TryCatchFinally.java
new file mode 100644
index 0000000..4f3769e
--- /dev/null
+++ b/dx/tests/115-merge/testdata/TryCatchFinally.java
@@ -0,0 +1,26 @@
+package testdata;
+
+public class TryCatchFinally {
+
+    public static void method() {
+        int count = 0;
+        try {
+            if (true) {
+                throw new NullPointerException();
+            }
+            throw new AssertionError();
+        } catch (IllegalStateException e) {
+            throw new AssertionError();
+        } catch (NullPointerException expected) {
+            count++;
+        } catch (RuntimeException e) {
+            throw new AssertionError();
+        } finally {
+            count++;
+        }
+
+        if (count != 2) {
+            throw new AssertionError();
+        }
+    }
+}
diff --git a/dx/tests/116-leb128/com/android/dx/util/Leb128UtilsTest.java b/dx/tests/116-leb128/com/android/dx/util/Leb128UtilsTest.java
new file mode 100644
index 0000000..f47bc86
--- /dev/null
+++ b/dx/tests/116-leb128/com/android/dx/util/Leb128UtilsTest.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import junit.framework.TestCase;
+
+public final class Leb128UtilsTest extends TestCase {
+
+    public void testDecodeUnsignedLeb() throws IOException {
+        assertEquals(0, Leb128Utils.readUnsignedLeb128(newDataInput((byte) 0)));
+        assertEquals(1, Leb128Utils.readUnsignedLeb128(newDataInput((byte) 1)));
+        assertEquals(127, Leb128Utils.readUnsignedLeb128(newDataInput((byte) 0x7F)));
+        assertEquals(16256, Leb128Utils.readUnsignedLeb128(newDataInput((byte) 0x80, (byte) 0x7F)));
+    }
+
+    public void testEncodeUnsignedLeb() throws IOException {
+        assertEquals(new byte[] { 0 }, encodeUnsignedLeb(0));
+        assertEquals(new byte[] { 1 }, encodeUnsignedLeb(1));
+        assertEquals(new byte[] { 0x7F }, encodeUnsignedLeb(127));
+        assertEquals(new byte[] { (byte) 0x80, 0x7F }, encodeUnsignedLeb(16256));
+        assertEquals(new byte[] { (byte) 0xb4, 0x07 }, encodeUnsignedLeb(0x3b4));
+        assertEquals(new byte[] { (byte) 0x8c, 0x08 }, encodeUnsignedLeb(0x40c));
+    }
+
+    public void testDecodeSignedLeb() throws IOException {
+        assertEquals(0, Leb128Utils.readSignedLeb128(newDataInput((byte) 0)));
+        assertEquals(1, Leb128Utils.readSignedLeb128(newDataInput((byte) 1)));
+        assertEquals(-1, Leb128Utils.readSignedLeb128(newDataInput((byte) 0x7F)));
+        assertEquals(0x3C, Leb128Utils.readSignedLeb128(newDataInput((byte) 0x3C)));
+        assertEquals(-128, Leb128Utils.readSignedLeb128(newDataInput((byte) 0x80, (byte) 0x7F)));
+    }
+
+    public void testEncodeSignedLeb() throws IOException {
+        assertEquals(new byte[] { 0 }, encodeSignedLeb(0));
+        assertEquals(new byte[] { 1 }, encodeSignedLeb(1));
+        assertEquals(new byte[] { 0x7F }, encodeSignedLeb(-1));
+        assertEquals(new byte[] { (byte) 0x80, 0x7F }, encodeSignedLeb(-128));
+    }
+
+    private byte[] encodeSignedLeb(int value) {
+        byte[] buffer = new byte[5];
+        int length = Leb128Utils.writeSignedLeb128(buffer, 0, value);
+        return Arrays.copyOfRange(buffer, 0, length);
+    }
+
+    private byte[] encodeUnsignedLeb(int value) {
+        byte[] buffer = new byte[5];
+        int length = Leb128Utils.writeUnsignedLeb128(buffer, 0, value);
+        return Arrays.copyOfRange(buffer, 0, length);
+    }
+
+    public DataInputStream newDataInput(byte... bytes) {
+        return new DataInputStream(new ByteArrayInputStream(bytes));
+    }
+
+    private void assertEquals(byte[] expected, byte[] actual) {
+        assertTrue(Arrays.equals(expected, actual));
+    }
+}
diff --git a/dx/tests/116-leb128/expected.txt b/dx/tests/116-leb128/expected.txt
new file mode 100644
index 0000000..5418338
--- /dev/null
+++ b/dx/tests/116-leb128/expected.txt
@@ -0,0 +1 @@
+Yay!
diff --git a/dx/tests/116-leb128/info.txt b/dx/tests/116-leb128/info.txt
new file mode 100644
index 0000000..1603ec3
--- /dev/null
+++ b/dx/tests/116-leb128/info.txt
@@ -0,0 +1,5 @@
+Performs little endian operations.
+
+The run script requires vogar, so you must have vogar on your $PATH to run this
+test. You'll also need a device or host VM for vogar to attach to.
+
diff --git a/dx/tests/116-leb128/run b/dx/tests/116-leb128/run
new file mode 100644
index 0000000..1e729ed
--- /dev/null
+++ b/dx/tests/116-leb128/run
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Find dx.jar from dx in the android dev tree
+prog=`which dx`
+progdir=`dirname "${prog}"`
+
+vogar --classpath $progdir/../framework/dx.jar \
+  com/android/dx/util/Leb128UtilsTest.java > unit-out.txt
+
+if [ "$?" = "0" ]; then
+    echo "Yay!"
+else
+    cat unit-out.txt
+fi
diff --git a/dx/tests/117-modified-utf8/com/android/dx/util/Mutf8Test.java b/dx/tests/117-modified-utf8/com/android/dx/util/Mutf8Test.java
new file mode 100644
index 0000000..736b1bf
--- /dev/null
+++ b/dx/tests/117-modified-utf8/com/android/dx/util/Mutf8Test.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.dx.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import junit.framework.TestCase;
+
+public final class Mutf8Test extends TestCase {
+
+    public void testDecode() throws IOException {
+        File file = createTempFile(new byte[] { 'A', 'B', 'C', (byte) 0xc0, (byte) 0x80, 0, 'E' });
+        RandomAccessFile f = new RandomAccessFile(file, "r");
+        assertEquals('A', f.readByte());
+        assertEquals("BC\u0000", Mutf8.decode(f, new char[3]));
+        assertEquals('E', f.readByte());
+        file.delete();
+    }
+
+    public void testEncode() throws IOException {
+        assertEquals(Arrays.toString(new byte[] { 'B', 'C', (byte) 0xc0, (byte) 0x80 }),
+                Arrays.toString(Mutf8.encode("BC\u0000")));
+    }
+
+    private File createTempFile(byte[] contents) throws IOException {
+        File result = File.createTempFile(getClass().getName(), "test");
+        FileOutputStream out = new FileOutputStream(result);
+        out.write(contents);
+        out.close();
+        return result;
+    }
+}
diff --git a/dx/tests/117-modified-utf8/expected.txt b/dx/tests/117-modified-utf8/expected.txt
new file mode 100644
index 0000000..5418338
--- /dev/null
+++ b/dx/tests/117-modified-utf8/expected.txt
@@ -0,0 +1 @@
+Yay!
diff --git a/dx/tests/117-modified-utf8/info.txt b/dx/tests/117-modified-utf8/info.txt
new file mode 100644
index 0000000..df11d98
--- /dev/null
+++ b/dx/tests/117-modified-utf8/info.txt
@@ -0,0 +1,5 @@
+Performs modified UTF-8 operations.
+
+The run script requires vogar, so you must have vogar on your $PATH to run this
+test. You'll also need a device or host VM for vogar to attach to.
+
diff --git a/dx/tests/117-modified-utf8/run b/dx/tests/117-modified-utf8/run
new file mode 100644
index 0000000..a689991
--- /dev/null
+++ b/dx/tests/117-modified-utf8/run
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Find dx.jar from dx in the android dev tree
+prog=`which dx`
+progdir=`dirname "${prog}"`
+
+vogar --classpath $progdir/../framework/dx.jar \
+  com/android/dx/util/Mutf8Test.java > unit-out.txt
+
+if [ "$?" = "0" ]; then
+    echo "Yay!"
+else
+    cat unit-out.txt
+fi
diff --git a/dx/tests/118-find-usages/Foo.java b/dx/tests/118-find-usages/Foo.java
new file mode 100644
index 0000000..d5dc0bd
--- /dev/null
+++ b/dx/tests/118-find-usages/Foo.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Reader;
+import java.io.StreamTokenizer;
+import java.util.AbstractList;
+import java.util.ArrayList;
+
+public final class Foo {
+
+    public void writeStreamTokenizerNval() {
+        new StreamTokenizer((Reader) null).nval = 5;
+    }
+
+    public double readStreamTokenizerNval() {
+        return new StreamTokenizer((Reader) null).nval;
+    }
+
+    public void callStringValueOf() {
+        String.valueOf(5);
+    }
+
+    public void callIntegerValueOf() {
+        Integer.valueOf("5");
+    }
+
+    public void callArrayListRemoveIndex() {
+        new ArrayList<String>().remove(5);
+    }
+
+    public void callArrayListRemoveValue() {
+        new ArrayList<String>().remove("5");
+    }
+
+    static class MyList<T> extends AbstractList<T> {
+        @Override public T get(int index) {
+            return null;
+        }
+        @Override public int size() {
+            return 0;
+        }
+        @Override public boolean remove(Object o) {
+            return false;
+        }
+    }
+}
diff --git a/dx/tests/118-find-usages/expected.txt b/dx/tests/118-find-usages/expected.txt
new file mode 100644
index 0000000..aca2bf1
--- /dev/null
+++ b/dx/tests/118-find-usages/expected.txt
@@ -0,0 +1,9 @@
+StreamTokenizer.nval
+LFoo;.readStreamTokenizerNval: field reference (iget-wide)
+LFoo;.writeStreamTokenizerNval: field reference (iput-wide)
+ArrayList.remove()
+LFoo;.callArrayListRemoveIndex: method reference (invoke-virtual)
+LFoo;.callArrayListRemoveValue: method reference (invoke-virtual)
+Collection.remove()
+String.valueOf()
+LFoo;.callStringValueOf: method reference (invoke-static)
diff --git a/dx/tests/118-find-usages/info.txt b/dx/tests/118-find-usages/info.txt
new file mode 100644
index 0000000..2a4e8a6
--- /dev/null
+++ b/dx/tests/118-find-usages/info.txt
@@ -0,0 +1,3 @@
+Creates a .dex file and runs find usages on it to find references and declarations.
+
+The expected output assumes this bug has not yet been fixed: http://b/3366285
\ No newline at end of file
diff --git a/dx/tests/118-find-usages/run b/dx/tests/118-find-usages/run
new file mode 100644
index 0000000..22f38cc
--- /dev/null
+++ b/dx/tests/118-find-usages/run
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+$JAVAC -d . *.java
+dx --output=foo.dex --dex *.class
+
+echo "StreamTokenizer.nval"
+dx --find-usages foo.dex "Ljava/io/StreamTokenizer;" nval
+
+echo "ArrayList.remove()"
+dx --find-usages foo.dex "Ljava/util/ArrayList;" remove
+
+echo "Collection.remove()"
+dx --find-usages foo.dex "Ljava/util/Collection;" remove
+
+echo "String.valueOf()"
+dx --find-usages foo.dex "Ljava/lang/String;" valueOf
diff --git a/libdex/Android.mk b/libdex/Android.mk
index 281801e..26f84d6 100644
--- a/libdex/Android.mk
+++ b/libdex/Android.mk
@@ -19,12 +19,14 @@
 	DexCatch.c \
 	DexClass.c \
 	DexDataMap.c \
+	DexDebugInfo.c \
 	DexFile.c \
 	DexInlines.c \
 	DexOptData.c \
 	DexOpcodes.c \
 	DexProto.c \
 	DexSwapVerify.c \
+	DexUtf.c \
 	InstrUtils.c \
 	Leb128.c \
 	OptInvocation.c \
diff --git a/libdex/DexDebugInfo.c b/libdex/DexDebugInfo.c
new file mode 100644
index 0000000..43765f7
--- /dev/null
+++ b/libdex/DexDebugInfo.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Handling of method debug info in a .dex file.
+ */
+
+#include "DexDebugInfo.h"
+#include "DexProto.h"
+#include "Leb128.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Decode the arguments in a method signature, which looks something
+ * like "(ID[Ljava/lang/String;)V".
+ *
+ * Returns the type signature letter for the next argument, or ')' if
+ * there are no more args.  Advances "pSig" to point to the character
+ * after the one returned.
+ */
+static char decodeSignature(const char** pSig)
+{
+    const char* sig = *pSig;
+
+    if (*sig == '(')
+        sig++;
+
+    if (*sig == 'L') {
+        /* object ref */
+        while (*++sig != ';')
+            ;
+        *pSig = sig+1;
+        return 'L';
+    }
+    if (*sig == '[') {
+        /* array; advance past array type */
+        while (*++sig == '[')
+            ;
+        if (*sig == 'L') {
+            while (*++sig != ';')
+                ;
+        }
+        *pSig = sig+1;
+        return '[';
+    }
+    if (*sig == '\0')
+        return *sig;        /* don't advance further */
+
+    *pSig = sig+1;
+    return *sig;
+}
+
+/*
+ * returns the length of a type string, given the start of the
+ * type string. Used for the case where the debug info format
+ * references types that are inside a method type signature.
+ */
+static int typeLength(const char *type) {
+    // Assumes any leading '(' has already been gobbled
+    const char *end = type;
+    decodeSignature(&end);
+    return end - type;
+}
+
+/*
+ * Reads a string index as encoded for the debug info format,
+ * returning a string pointer or NULL as appropriate.
+ */
+static const char* readStringIdx(const DexFile* pDexFile,
+        const u1** pStream) {
+    u4 stringIdx = readUnsignedLeb128(pStream);
+
+    // Remember, encoded string indicies have 1 added to them.
+    if (stringIdx == 0) {
+        return NULL;
+    } else {
+        return dexStringById(pDexFile, stringIdx - 1);
+    }
+}
+
+/*
+ * Reads a type index as encoded for the debug info format, returning
+ * a string pointer for its descriptor or NULL as appropriate.
+ */
+static const char* readTypeIdx(const DexFile* pDexFile,
+        const u1** pStream) {
+    u4 typeIdx = readUnsignedLeb128(pStream);
+
+    // Remember, encoded type indicies have 1 added to them.
+    if (typeIdx == 0) {
+        return NULL;
+    } else {
+        return dexStringByTypeIdx(pDexFile, typeIdx - 1);
+    }
+}
+
+typedef struct LocalInfo {
+    const char *name;
+    const char *descriptor;
+    const char *signature;
+    u2 startAddress;
+    bool live;
+} LocalInfo;
+
+static void emitLocalCbIfLive(void *cnxt, int reg, u4 endAddress,
+        LocalInfo *localInReg, DexDebugNewLocalCb localCb)
+{
+    if (localCb != NULL && localInReg[reg].live) {
+        localCb(cnxt, reg, localInReg[reg].startAddress, endAddress,
+                localInReg[reg].name,
+                localInReg[reg].descriptor,
+                localInReg[reg].signature == NULL
+                ? "" : localInReg[reg].signature );
+    }
+}
+
+// TODO optimize localCb == NULL case
+void dexDecodeDebugInfo(
+            const DexFile* pDexFile,
+            const DexCode* pCode,
+            const char* classDescriptor,
+            u4 protoIdx,
+            u4 accessFlags,
+            DexDebugNewPositionCb posCb, DexDebugNewLocalCb localCb,
+            void* cnxt)
+{
+    const u1 *stream = dexGetDebugInfoStream(pDexFile, pCode);
+    u4 line;
+    u4 parametersSize;
+    u4 address = 0;
+    LocalInfo localInReg[pCode->registersSize];
+    u4 insnsSize = pCode->insnsSize;
+    DexProto proto = { pDexFile, protoIdx };
+
+    memset(localInReg, 0, sizeof(LocalInfo) * pCode->registersSize);
+
+    if (stream == NULL) {
+        goto end;
+    }
+
+    line = readUnsignedLeb128(&stream);
+    parametersSize = readUnsignedLeb128(&stream);
+
+    u2 argReg = pCode->registersSize - pCode->insSize;
+
+    if ((accessFlags & ACC_STATIC) == 0) {
+        /*
+         * The code is an instance method, which means that there is
+         * an initial this parameter. Also, the proto list should
+         * contain exactly one fewer argument word than the insSize
+         * indicates.
+         */
+        assert(pCode->insSize == (dexProtoComputeArgsSize(&proto) + 1));
+        localInReg[argReg].name = "this";
+        localInReg[argReg].descriptor = classDescriptor;
+        localInReg[argReg].startAddress = 0;
+        localInReg[argReg].live = true;
+        argReg++;
+    } else {
+        assert(pCode->insSize == dexProtoComputeArgsSize(&proto));
+    }
+
+    DexParameterIterator iterator;
+    dexParameterIteratorInit(&iterator, &proto);
+
+    while (parametersSize-- != 0) {
+        const char* descriptor = dexParameterIteratorNextDescriptor(&iterator);
+        const char *name;
+        int reg;
+
+        if ((argReg >= pCode->registersSize) || (descriptor == NULL)) {
+            goto invalid_stream;
+        }
+
+        name = readStringIdx(pDexFile, &stream);
+        reg = argReg;
+
+        switch (descriptor[0]) {
+            case 'D':
+            case 'J':
+                argReg += 2;
+                break;
+            default:
+                argReg += 1;
+                break;
+        }
+
+        if (name != NULL) {
+            localInReg[reg].name = name;
+            localInReg[reg].descriptor = descriptor;
+            localInReg[reg].signature = NULL;
+            localInReg[reg].startAddress = address;
+            localInReg[reg].live = true;
+        }
+    }
+
+    for (;;)  {
+        u1 opcode = *stream++;
+        u2 reg;
+
+        switch (opcode) {
+            case DBG_END_SEQUENCE:
+                goto end;
+
+            case DBG_ADVANCE_PC:
+                address += readUnsignedLeb128(&stream);
+                break;
+
+            case DBG_ADVANCE_LINE:
+                line += readSignedLeb128(&stream);
+                break;
+
+            case DBG_START_LOCAL:
+            case DBG_START_LOCAL_EXTENDED:
+                reg = readUnsignedLeb128(&stream);
+                if (reg > pCode->registersSize) goto invalid_stream;
+
+                // Emit what was previously there, if anything
+                emitLocalCbIfLive (cnxt, reg, address,
+                    localInReg, localCb);
+
+                localInReg[reg].name = readStringIdx(pDexFile, &stream);
+                localInReg[reg].descriptor = readTypeIdx(pDexFile, &stream);
+                if (opcode == DBG_START_LOCAL_EXTENDED) {
+                    localInReg[reg].signature
+                        = readStringIdx(pDexFile, &stream);
+                } else {
+                    localInReg[reg].signature = NULL;
+                }
+                localInReg[reg].startAddress = address;
+                localInReg[reg].live = true;
+                break;
+
+            case DBG_END_LOCAL:
+                reg = readUnsignedLeb128(&stream);
+                if (reg > pCode->registersSize) goto invalid_stream;
+
+                emitLocalCbIfLive (cnxt, reg, address, localInReg, localCb);
+                localInReg[reg].live = false;
+                break;
+
+            case DBG_RESTART_LOCAL:
+                reg = readUnsignedLeb128(&stream);
+                if (reg > pCode->registersSize) goto invalid_stream;
+
+                if (localInReg[reg].name == NULL
+                        || localInReg[reg].descriptor == NULL) {
+                    goto invalid_stream;
+                }
+
+                /*
+                 * If the register is live, the "restart" is superfluous,
+                 * and we don't want to mess with the existing start address.
+                 */
+                if (!localInReg[reg].live) {
+                    localInReg[reg].startAddress = address;
+                    localInReg[reg].live = true;
+                }
+                break;
+
+            case DBG_SET_PROLOGUE_END:
+            case DBG_SET_EPILOGUE_BEGIN:
+            case DBG_SET_FILE:
+                break;
+
+            default: {
+                int adjopcode = opcode - DBG_FIRST_SPECIAL;
+
+                address += adjopcode / DBG_LINE_RANGE;
+                line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
+
+                if (posCb != NULL) {
+                    int done;
+                    done = posCb(cnxt, address, line);
+
+                    if (done) {
+                        // early exit
+                        goto end;
+                    }
+                }
+                break;
+            }
+        }
+    }
+
+end:
+    {
+        int reg;
+        for (reg = 0; reg < pCode->registersSize; reg++) {
+            emitLocalCbIfLive (cnxt, reg, insnsSize, localInReg, localCb);
+        }
+    }
+    return;
+
+invalid_stream:
+    IF_LOGE() {
+        char* methodDescriptor = dexProtoCopyMethodDescriptor(&proto);
+        LOGE("Invalid debug info stream. class %s; proto %s",
+                classDescriptor, methodDescriptor);
+        free(methodDescriptor);
+    }
+}
diff --git a/libdex/DexDebugInfo.h b/libdex/DexDebugInfo.h
new file mode 100644
index 0000000..f23e365
--- /dev/null
+++ b/libdex/DexDebugInfo.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _LIBDEX_DEXDEBUGINFO
+#define _LIBDEX_DEXDEBUGINFO
+
+/*
+ * Handling of method debug info in a .dex file.
+ */
+
+#include "DexFile.h"
+
+/*
+ * Callback for "new position table entry".
+ * Returning non-0 causes the decoder to stop early.
+ */
+typedef int (*DexDebugNewPositionCb)(void *cnxt, u4 address, u4 lineNum);
+
+/*
+ * Callback for "new locals table entry". "signature" is an empty string
+ * if no signature is available for an entry.
+ */
+typedef void (*DexDebugNewLocalCb)(void *cnxt, u2 reg, u4 startAddress,
+        u4 endAddress, const char *name, const char *descriptor,
+        const char *signature);
+
+/*
+ * Decode debug info for method.
+ *
+ * posCb is called in ascending address order.
+ * localCb is called in order of ascending end address.
+ */
+void dexDecodeDebugInfo(
+            const DexFile* pDexFile,
+            const DexCode* pDexCode,
+            const char* classDescriptor,
+            u4 protoIdx,
+            u4 accessFlags,
+            DexDebugNewPositionCb posCb, DexDebugNewLocalCb localCb,
+            void* cnxt);
+
+#endif /* def _LIBDEX_DEXDEBUGINFO */
diff --git a/libdex/DexFile.c b/libdex/DexFile.c
index f997b94..6567dea 100644
--- a/libdex/DexFile.c
+++ b/libdex/DexFile.c
@@ -43,243 +43,6 @@
 static const bool kVerifyChecksum = false;
 static const bool kVerifySignature = false;
 
-
-/* Compare two '\0'-terminated modified UTF-8 strings, using Unicode
- * code point values for comparison. This treats different encodings
- * for the same code point as equivalent, except that only a real '\0'
- * byte is considered the string terminator. The return value is as
- * for strcmp(). */
-int dexUtf8Cmp(const char* s1, const char* s2) {
-    for (;;) {
-        if (*s1 == '\0') {
-            if (*s2 == '\0') {
-                return 0;
-            }
-            return -1;
-        } else if (*s2 == '\0') {
-            return 1;
-        }
-
-        int utf1 = dexGetUtf16FromUtf8(&s1);
-        int utf2 = dexGetUtf16FromUtf8(&s2);
-        int diff = utf1 - utf2;
-
-        if (diff != 0) {
-            return diff;
-        }
-    }
-}
-
-/* for dexIsValidMemberNameUtf8(), a bit vector indicating valid low ascii */
-u4 DEX_MEMBER_VALID_LOW_ASCII[4] = {
-    0x00000000, // 00..1f low control characters; nothing valid
-    0x03ff2010, // 20..3f digits and symbols; valid: '0'..'9', '$', '-'
-    0x87fffffe, // 40..5f uppercase etc.; valid: 'A'..'Z', '_'
-    0x07fffffe  // 60..7f lowercase etc.; valid: 'a'..'z'
-};
-
-/* Helper for dexIsValidMemberNameUtf8(); do not call directly. */
-bool dexIsValidMemberNameUtf8_0(const char** pUtf8Ptr) {
-    /*
-     * It's a multibyte encoded character. Decode it and analyze. We
-     * accept anything that isn't (a) an improperly encoded low value,
-     * (b) an improper surrogate pair, (c) an encoded '\0', (d) a high
-     * control character, or (e) a high space, layout, or special
-     * character (U+00a0, U+2000..U+200f, U+2028..U+202f,
-     * U+fff0..U+ffff).
-     */
-
-    u2 utf16 = dexGetUtf16FromUtf8(pUtf8Ptr);
-
-    // Perform follow-up tests based on the high 8 bits.
-    switch (utf16 >> 8) {
-        case 0x00: {
-            // It's only valid if it's above the ISO-8859-1 high space (0xa0).
-            return (utf16 > 0x00a0);
-        }
-        case 0xd8:
-        case 0xd9:
-        case 0xda:
-        case 0xdb: {
-            /*
-             * It's a leading surrogate. Check to see that a trailing
-             * surrogate follows.
-             */
-            utf16 = dexGetUtf16FromUtf8(pUtf8Ptr);
-            return (utf16 >= 0xdc00) && (utf16 <= 0xdfff);
-        }
-        case 0xdc:
-        case 0xdd:
-        case 0xde:
-        case 0xdf: {
-            // It's a trailing surrogate, which is not valid at this point.
-            return false;
-        }
-        case 0x20:
-        case 0xff: {
-            // It's in the range that has spaces, controls, and specials.
-            switch (utf16 & 0xfff8) {
-                case 0x2000:
-                case 0x2008:
-                case 0x2028:
-                case 0xfff0:
-                case 0xfff8: {
-                    return false;
-                }
-            }
-            break;
-        }
-    }
-
-    return true;
-}
-
-/* Return whether the given string is a valid field or method name. */
-bool dexIsValidMemberName(const char* s) {
-    bool angleName = false;
-
-    switch (*s) {
-        case '\0': {
-            // The empty string is not a valid name.
-            return false;
-        }
-        case '<': {
-            /*
-             * '<' is allowed only at the start of a name, and if present,
-             * means that the name must end with '>'.
-             */
-            angleName = true;
-            s++;
-            break;
-        }
-    }
-
-    for (;;) {
-        switch (*s) {
-            case '\0': {
-                return !angleName;
-            }
-            case '>': {
-                return angleName && s[1] == '\0';
-            }
-        }
-        if (!dexIsValidMemberNameUtf8(&s)) {
-            return false;
-        }
-    }
-}
-
-/* Return whether the given string is a valid type descriptor. */
-bool dexIsValidTypeDescriptor(const char* s) {
-    int arrayCount = 0;
-
-    while (*s == '[') {
-        arrayCount++;
-        s++;
-    }
-
-    if (arrayCount > 255) {
-        // Arrays may have no more than 255 dimensions.
-        return false;
-    }
-
-    switch (*(s++)) {
-        case 'B':
-        case 'C':
-        case 'D':
-        case 'F':
-        case 'I':
-        case 'J':
-        case 'S':
-        case 'Z': {
-            // These are all single-character descriptors for primitive types.
-            return (*s == '\0');
-        }
-        case 'V': {
-            // You can't have an array of void.
-            return (arrayCount == 0) && (*s == '\0');
-        }
-        case 'L': {
-            // Break out and continue below.
-            break;
-        }
-        default: {
-            // Oddball descriptor character.
-            return false;
-        }
-    }
-
-    // We just consumed the 'L' that introduces a class name.
-
-    bool slashOrFirst = true; // first character or just encountered a slash
-    for (;;) {
-        u1 c = (u1) *s;
-        switch (c) {
-            case '\0': {
-                // Premature end.
-                return false;
-            }
-            case ';': {
-                /*
-                 * Make sure that this is the end of the string and that
-                 * it doesn't end with an empty component (including the
-                 * degenerate case of "L;").
-                 */
-                return (s[1] == '\0') && !slashOrFirst;
-            }
-            case '/': {
-                if (slashOrFirst) {
-                    // Slash at start or two slashes in a row.
-                    return false;
-                }
-                slashOrFirst = true;
-                s++;
-                break;
-            }
-            default: {
-                if (!dexIsValidMemberNameUtf8(&s)) {
-                    return false;
-                }
-                slashOrFirst = false;
-                break;
-            }
-        }
-    }
-}
-
-/* Return whether the given string is a valid reference descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for a class or array and not a primitive type. */
-bool dexIsReferenceDescriptor(const char* s) {
-    if (!dexIsValidTypeDescriptor(s)) {
-        return false;
-    }
-
-    return (s[0] == 'L') || (s[0] == '[');
-}
-
-/* Return whether the given string is a valid class descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for a class and not an array or primitive type. */
-bool dexIsClassDescriptor(const char* s) {
-    if (!dexIsValidTypeDescriptor(s)) {
-        return false;
-    }
-
-    return s[0] == 'L';
-}
-
-/* Return whether the given string is a valid field type descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for anything but "void". */
-bool dexIsFieldDescriptor(const char* s) {
-    if (!dexIsValidTypeDescriptor(s)) {
-        return false;
-    }
-
-    return s[0] != 'V';
-}
-
 /* Return the UTF-8 encoded string with the specified string_id index,
  * also filling in the UTF-16 size (number of 16-bit code points).*/
 const char* dexStringAndSizeById(const DexFile* pDexFile, u4 idx,
@@ -384,24 +147,6 @@
 }
 
 /*
- * Round up to the next highest power of 2.
- *
- * Found on http://graphics.stanford.edu/~seander/bithacks.html.
- */
-u4 dexRoundUpPower2(u4 val)
-{
-    val--;
-    val |= val >> 1;
-    val |= val >> 2;
-    val |= val >> 4;
-    val |= val >> 8;
-    val |= val >> 16;
-    val++;
-
-    return val;
-}
-
-/*
  * Create the class lookup hash table.
  *
  * Returns newly-allocated storage.
@@ -730,303 +475,20 @@
     return (handlerData - (u1*) pCode) + offset;
 }
 
-
 /*
- * ===========================================================================
- *      Debug info
- * ===========================================================================
- */
-
-/*
- * Decode the arguments in a method signature, which looks something
- * like "(ID[Ljava/lang/String;)V".
+ * Round up to the next highest power of 2.
  *
- * Returns the type signature letter for the next argument, or ')' if
- * there are no more args.  Advances "pSig" to point to the character
- * after the one returned.
+ * Found on http://graphics.stanford.edu/~seander/bithacks.html.
  */
-static char decodeSignature(const char** pSig)
+u4 dexRoundUpPower2(u4 val)
 {
-    const char* sig = *pSig;
+    val--;
+    val |= val >> 1;
+    val |= val >> 2;
+    val |= val >> 4;
+    val |= val >> 8;
+    val |= val >> 16;
+    val++;
 
-    if (*sig == '(')
-        sig++;
-
-    if (*sig == 'L') {
-        /* object ref */
-        while (*++sig != ';')
-            ;
-        *pSig = sig+1;
-        return 'L';
-    }
-    if (*sig == '[') {
-        /* array; advance past array type */
-        while (*++sig == '[')
-            ;
-        if (*sig == 'L') {
-            while (*++sig != ';')
-                ;
-        }
-        *pSig = sig+1;
-        return '[';
-    }
-    if (*sig == '\0')
-        return *sig;        /* don't advance further */
-
-    *pSig = sig+1;
-    return *sig;
-}
-
-/*
- * returns the length of a type string, given the start of the
- * type string. Used for the case where the debug info format
- * references types that are inside a method type signature.
- */
-static int typeLength (const char *type) {
-    // Assumes any leading '(' has already been gobbled
-    const char *end = type;
-    decodeSignature(&end);
-    return end - type;
-}
-
-/*
- * Reads a string index as encoded for the debug info format,
- * returning a string pointer or NULL as appropriate.
- */
-static const char* readStringIdx(const DexFile* pDexFile,
-        const u1** pStream) {
-    u4 stringIdx = readUnsignedLeb128(pStream);
-
-    // Remember, encoded string indicies have 1 added to them.
-    if (stringIdx == 0) {
-        return NULL;
-    } else {
-        return dexStringById(pDexFile, stringIdx - 1);
-    }
-}
-
-/*
- * Reads a type index as encoded for the debug info format, returning
- * a string pointer for its descriptor or NULL as appropriate.
- */
-static const char* readTypeIdx(const DexFile* pDexFile,
-        const u1** pStream) {
-    u4 typeIdx = readUnsignedLeb128(pStream);
-
-    // Remember, encoded type indicies have 1 added to them.
-    if (typeIdx == 0) {
-        return NULL;
-    } else {
-        return dexStringByTypeIdx(pDexFile, typeIdx - 1);
-    }
-}
-
-/* access_flag value indicating that a method is static */
-#define ACC_STATIC              0x0008
-
-typedef struct LocalInfo {
-    const char *name;
-    const char *descriptor;
-    const char *signature;
-    u2 startAddress;
-    bool live;
-} LocalInfo;
-
-static void emitLocalCbIfLive (void *cnxt, int reg, u4 endAddress,
-        LocalInfo *localInReg, DexDebugNewLocalCb localCb)
-{
-    if (localCb != NULL && localInReg[reg].live) {
-        localCb(cnxt, reg, localInReg[reg].startAddress, endAddress,
-                localInReg[reg].name,
-                localInReg[reg].descriptor,
-                localInReg[reg].signature == NULL
-                ? "" : localInReg[reg].signature );
-    }
-}
-
-// TODO optimize localCb == NULL case
-void dexDecodeDebugInfo(
-            const DexFile* pDexFile,
-            const DexCode* pCode,
-            const char* classDescriptor,
-            u4 protoIdx,
-            u4 accessFlags,
-            DexDebugNewPositionCb posCb, DexDebugNewLocalCb localCb,
-            void* cnxt)
-{
-    const u1 *stream = dexGetDebugInfoStream(pDexFile, pCode);
-    u4 line;
-    u4 parametersSize;
-    u4 address = 0;
-    LocalInfo localInReg[pCode->registersSize];
-    u4 insnsSize = pCode->insnsSize;
-    DexProto proto = { pDexFile, protoIdx };
-
-    memset(localInReg, 0, sizeof(LocalInfo) * pCode->registersSize);
-
-    if (stream == NULL) {
-        goto end;
-    }
-
-    line = readUnsignedLeb128(&stream);
-    parametersSize = readUnsignedLeb128(&stream);
-
-    u2 argReg = pCode->registersSize - pCode->insSize;
-
-    if ((accessFlags & ACC_STATIC) == 0) {
-        /*
-         * The code is an instance method, which means that there is
-         * an initial this parameter. Also, the proto list should
-         * contain exactly one fewer argument word than the insSize
-         * indicates.
-         */
-        assert(pCode->insSize == (dexProtoComputeArgsSize(&proto) + 1));
-        localInReg[argReg].name = "this";
-        localInReg[argReg].descriptor = classDescriptor;
-        localInReg[argReg].startAddress = 0;
-        localInReg[argReg].live = true;
-        argReg++;
-    } else {
-        assert(pCode->insSize == dexProtoComputeArgsSize(&proto));
-    }
-
-    DexParameterIterator iterator;
-    dexParameterIteratorInit(&iterator, &proto);
-
-    while (parametersSize-- != 0) {
-        const char* descriptor = dexParameterIteratorNextDescriptor(&iterator);
-        const char *name;
-        int reg;
-
-        if ((argReg >= pCode->registersSize) || (descriptor == NULL)) {
-            goto invalid_stream;
-        }
-
-        name = readStringIdx(pDexFile, &stream);
-        reg = argReg;
-
-        switch (descriptor[0]) {
-            case 'D':
-            case 'J':
-                argReg += 2;
-                break;
-            default:
-                argReg += 1;
-                break;
-        }
-
-        if (name != NULL) {
-            localInReg[reg].name = name;
-            localInReg[reg].descriptor = descriptor;
-            localInReg[reg].signature = NULL;
-            localInReg[reg].startAddress = address;
-            localInReg[reg].live = true;
-        }
-    }
-
-    for (;;)  {
-        u1 opcode = *stream++;
-        u2 reg;
-
-        switch (opcode) {
-            case DBG_END_SEQUENCE:
-                goto end;
-
-            case DBG_ADVANCE_PC:
-                address += readUnsignedLeb128(&stream);
-                break;
-
-            case DBG_ADVANCE_LINE:
-                line += readSignedLeb128(&stream);
-                break;
-
-            case DBG_START_LOCAL:
-            case DBG_START_LOCAL_EXTENDED:
-                reg = readUnsignedLeb128(&stream);
-                if (reg > pCode->registersSize) goto invalid_stream;
-
-                // Emit what was previously there, if anything
-                emitLocalCbIfLive (cnxt, reg, address,
-                    localInReg, localCb);
-
-                localInReg[reg].name = readStringIdx(pDexFile, &stream);
-                localInReg[reg].descriptor = readTypeIdx(pDexFile, &stream);
-                if (opcode == DBG_START_LOCAL_EXTENDED) {
-                    localInReg[reg].signature
-                        = readStringIdx(pDexFile, &stream);
-                } else {
-                    localInReg[reg].signature = NULL;
-                }
-                localInReg[reg].startAddress = address;
-                localInReg[reg].live = true;
-                break;
-
-            case DBG_END_LOCAL:
-                reg = readUnsignedLeb128(&stream);
-                if (reg > pCode->registersSize) goto invalid_stream;
-
-                emitLocalCbIfLive (cnxt, reg, address, localInReg, localCb);
-                localInReg[reg].live = false;
-                break;
-
-            case DBG_RESTART_LOCAL:
-                reg = readUnsignedLeb128(&stream);
-                if (reg > pCode->registersSize) goto invalid_stream;
-
-                if (localInReg[reg].name == NULL
-                        || localInReg[reg].descriptor == NULL) {
-                    goto invalid_stream;
-                }
-
-                /*
-                 * If the register is live, the "restart" is superfluous,
-                 * and we don't want to mess with the existing start address.
-                 */
-                if (!localInReg[reg].live) {
-                    localInReg[reg].startAddress = address;
-                    localInReg[reg].live = true;
-                }
-                break;
-
-            case DBG_SET_PROLOGUE_END:
-            case DBG_SET_EPILOGUE_BEGIN:
-            case DBG_SET_FILE:
-                break;
-
-            default: {
-                int adjopcode = opcode - DBG_FIRST_SPECIAL;
-
-                address += adjopcode / DBG_LINE_RANGE;
-                line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
-
-                if (posCb != NULL) {
-                    int done;
-                    done = posCb(cnxt, address, line);
-
-                    if (done) {
-                        // early exit
-                        goto end;
-                    }
-                }
-                break;
-            }
-        }
-    }
-
-end:
-    {
-        int reg;
-        for (reg = 0; reg < pCode->registersSize; reg++) {
-            emitLocalCbIfLive (cnxt, reg, insnsSize, localInReg, localCb);
-        }
-    }
-    return;
-
-invalid_stream:
-    IF_LOGE() {
-        char* methodDescriptor = dexProtoCopyMethodDescriptor(&proto);
-        LOGE("Invalid debug info stream. class %s; proto %s",
-                classDescriptor, methodDescriptor);
-        free(methodDescriptor);
-    }
+    return val;
 }
diff --git a/libdex/DexFile.h b/libdex/DexFile.h
index 06dc864..13eac87 100644
--- a/libdex/DexFile.h
+++ b/libdex/DexFile.h
@@ -464,10 +464,7 @@
     /* pad for 64-bit alignment if necessary */
 } DexOptHeader;
 
-#define DEX_FLAG_VERIFIED           (1)     /* tried to verify all classes */
 #define DEX_OPT_FLAG_BIG            (1<<1)  /* swapped to big-endian */
-#define DEX_OPT_FLAG_FIELDS         (1<<2)  /* field access optimized */
-#define DEX_OPT_FLAG_INVOCATIONS    (1<<3)  /* method calls optimized */
 
 #define DEX_INTERFACE_CACHE_SIZE    128     /* must be power of 2 */
 
@@ -753,35 +750,6 @@
     }
 }
 
-/*
- * Callback for "new position table entry".
- * Returning non-0 causes the decoder to stop early.
- */
-typedef int (*DexDebugNewPositionCb)(void *cnxt, u4 address, u4 lineNum);
-
-/*
- * Callback for "new locals table entry". "signature" is an empty string
- * if no signature is available for an entry.
- */
-typedef void (*DexDebugNewLocalCb)(void *cnxt, u2 reg, u4 startAddress,
-        u4 endAddress, const char *name, const char *descriptor,
-        const char *signature);
-
-/*
- * Decode debug info for method.
- *
- * posCb is called in ascending address order.
- * localCb is called in order of ascending end address.
- */
-void dexDecodeDebugInfo(
-            const DexFile* pDexFile,
-            const DexCode* pDexCode,
-            const char* classDescriptor,
-            u4 protoIdx,
-            u4 accessFlags,
-            DexDebugNewPositionCb posCb, DexDebugNewLocalCb localCb,
-            void* cnxt);
-
 /* DexClassDef convenience - get class descriptor */
 DEX_INLINE const char* dexGetClassDescriptor(const DexFile* pDexFile,
     const DexClassDef* pClassDef)
@@ -949,108 +917,4 @@
         (pDexFile->baseAddr + dexGetAnnotationOff(pAnnoSet, idx));
 }
 
-
-/*
- * ===========================================================================
- *      Utility Functions
- * ===========================================================================
- */
-
-/*
- * Retrieve the next UTF-16 character from a UTF-8 string.
- *
- * Advances "*pUtf8Ptr" to the start of the next character.
- *
- * WARNING: If a string is corrupted by dropping a '\0' in the middle
- * of a 3-byte sequence, you can end up overrunning the buffer with
- * reads (and possibly with the writes if the length was computed and
- * cached before the damage). For performance reasons, this function
- * assumes that the string being parsed is known to be valid (e.g., by
- * already being verified). Most strings we process here are coming
- * out of dex files or other internal translations, so the only real
- * risk comes from the JNI NewStringUTF call.
- */
-DEX_INLINE u2 dexGetUtf16FromUtf8(const char** pUtf8Ptr)
-{
-    unsigned int one, two, three;
-
-    one = *(*pUtf8Ptr)++;
-    if ((one & 0x80) != 0) {
-        /* two- or three-byte encoding */
-        two = *(*pUtf8Ptr)++;
-        if ((one & 0x20) != 0) {
-            /* three-byte encoding */
-            three = *(*pUtf8Ptr)++;
-            return ((one & 0x0f) << 12) |
-                   ((two & 0x3f) << 6) |
-                   (three & 0x3f);
-        } else {
-            /* two-byte encoding */
-            return ((one & 0x1f) << 6) |
-                   (two & 0x3f);
-        }
-    } else {
-        /* one-byte encoding */
-        return one;
-    }
-}
-
-/* Compare two '\0'-terminated modified UTF-8 strings, using Unicode
- * code point values for comparison. This treats different encodings
- * for the same code point as equivalent, except that only a real '\0'
- * byte is considered the string terminator. The return value is as
- * for strcmp(). */
-int dexUtf8Cmp(const char* s1, const char* s2);
-
-
-/* for dexIsValidMemberNameUtf8(), a bit vector indicating valid low ascii */
-extern u4 DEX_MEMBER_VALID_LOW_ASCII[4];
-
-/* Helper for dexIsValidMemberUtf8(); do not call directly. */
-bool dexIsValidMemberNameUtf8_0(const char** pUtf8Ptr);
-
-/* Return whether the pointed-at modified-UTF-8 encoded character is
- * valid as part of a member name, updating the pointer to point past
- * the consumed character. This will consume two encoded UTF-16 code
- * points if the character is encoded as a surrogate pair. Also, if
- * this function returns false, then the given pointer may only have
- * been partially advanced. */
-DEX_INLINE bool dexIsValidMemberNameUtf8(const char** pUtf8Ptr) {
-    u1 c = (u1) **pUtf8Ptr;
-    if (c <= 0x7f) {
-        // It's low-ascii, so check the table.
-        u4 wordIdx = c >> 5;
-        u4 bitIdx = c & 0x1f;
-        (*pUtf8Ptr)++;
-        return (DEX_MEMBER_VALID_LOW_ASCII[wordIdx] & (1 << bitIdx)) != 0;
-    }
-
-    /*
-     * It's a multibyte encoded character. Call a non-inline function
-     * for the heavy lifting.
-     */
-    return dexIsValidMemberNameUtf8_0(pUtf8Ptr);
-}
-
-/* Return whether the given string is a valid field or method name. */
-bool dexIsValidMemberName(const char* s);
-
-/* Return whether the given string is a valid type descriptor. */
-bool dexIsValidTypeDescriptor(const char* s);
-
-/* Return whether the given string is a valid reference descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for a class or array and not a primitive type. */
-bool dexIsReferenceDescriptor(const char* s);
-
-/* Return whether the given string is a valid class descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for a class and not an array or primitive type. */
-bool dexIsClassDescriptor(const char* s);
-
-/* Return whether the given string is a valid field type descriptor. This
- * is true if dexIsValidTypeDescriptor() returns true and the descriptor
- * is for anything but "void". */
-bool dexIsFieldDescriptor(const char* s);
-
 #endif /*_LIBDEX_DEXFILE*/
diff --git a/libdex/DexInlines.c b/libdex/DexInlines.c
index f33835f..cbedb62 100644
--- a/libdex/DexInlines.c
+++ b/libdex/DexInlines.c
@@ -25,6 +25,7 @@
 #include "DexCatch.h"
 #include "DexClass.h"
 #include "DexDataMap.h"
+#include "DexUtf.h"
 #include "DexOpcodes.h"
 #include "DexProto.h"
 #include "InstrUtils.h"
diff --git a/libdex/DexOpcodes.c b/libdex/DexOpcodes.c
index 0cbc518..50254a7 100644
--- a/libdex/DexOpcodes.c
+++ b/libdex/DexOpcodes.c
@@ -270,7 +270,7 @@
     "^throw-verification-error",
     "+execute-inline",
     "+execute-inline/range",
-    "+invoke-direct-empty",
+    "+invoke-object-init/range",
     "+return-void-barrier",
     "+iget-quick",
     "+iget-wide-quick",
@@ -286,6 +286,262 @@
     "+sget-object-volatile",
     "+sput-object-volatile",
     "dispatch-ff",
+    "const-class/jumbo",
+    "check-cast/jumbo",
+    "instance-of/jumbo",
+    "new-instance/jumbo",
+    "new-array/jumbo",
+    "filled-new-array/jumbo",
+    "iget/jumbo",
+    "iget-wide/jumbo",
+    "iget-object/jumbo",
+    "iget-boolean/jumbo",
+    "iget-byte/jumbo",
+    "iget-char/jumbo",
+    "iget-short/jumbo",
+    "iput/jumbo",
+    "iput-wide/jumbo",
+    "iput-object/jumbo",
+    "iput-boolean/jumbo",
+    "iput-byte/jumbo",
+    "iput-char/jumbo",
+    "iput-short/jumbo",
+    "sget/jumbo",
+    "sget-wide/jumbo",
+    "sget-object/jumbo",
+    "sget-boolean/jumbo",
+    "sget-byte/jumbo",
+    "sget-char/jumbo",
+    "sget-short/jumbo",
+    "sput/jumbo",
+    "sput-wide/jumbo",
+    "sput-object/jumbo",
+    "sput-boolean/jumbo",
+    "sput-byte/jumbo",
+    "sput-char/jumbo",
+    "sput-short/jumbo",
+    "invoke-virtual/jumbo",
+    "invoke-super/jumbo",
+    "invoke-direct/jumbo",
+    "invoke-static/jumbo",
+    "invoke-interface/jumbo",
+    "unused-27ff",
+    "unused-28ff",
+    "unused-29ff",
+    "unused-2aff",
+    "unused-2bff",
+    "unused-2cff",
+    "unused-2dff",
+    "unused-2eff",
+    "unused-2fff",
+    "unused-30ff",
+    "unused-31ff",
+    "unused-32ff",
+    "unused-33ff",
+    "unused-34ff",
+    "unused-35ff",
+    "unused-36ff",
+    "unused-37ff",
+    "unused-38ff",
+    "unused-39ff",
+    "unused-3aff",
+    "unused-3bff",
+    "unused-3cff",
+    "unused-3dff",
+    "unused-3eff",
+    "unused-3fff",
+    "unused-40ff",
+    "unused-41ff",
+    "unused-42ff",
+    "unused-43ff",
+    "unused-44ff",
+    "unused-45ff",
+    "unused-46ff",
+    "unused-47ff",
+    "unused-48ff",
+    "unused-49ff",
+    "unused-4aff",
+    "unused-4bff",
+    "unused-4cff",
+    "unused-4dff",
+    "unused-4eff",
+    "unused-4fff",
+    "unused-50ff",
+    "unused-51ff",
+    "unused-52ff",
+    "unused-53ff",
+    "unused-54ff",
+    "unused-55ff",
+    "unused-56ff",
+    "unused-57ff",
+    "unused-58ff",
+    "unused-59ff",
+    "unused-5aff",
+    "unused-5bff",
+    "unused-5cff",
+    "unused-5dff",
+    "unused-5eff",
+    "unused-5fff",
+    "unused-60ff",
+    "unused-61ff",
+    "unused-62ff",
+    "unused-63ff",
+    "unused-64ff",
+    "unused-65ff",
+    "unused-66ff",
+    "unused-67ff",
+    "unused-68ff",
+    "unused-69ff",
+    "unused-6aff",
+    "unused-6bff",
+    "unused-6cff",
+    "unused-6dff",
+    "unused-6eff",
+    "unused-6fff",
+    "unused-70ff",
+    "unused-71ff",
+    "unused-72ff",
+    "unused-73ff",
+    "unused-74ff",
+    "unused-75ff",
+    "unused-76ff",
+    "unused-77ff",
+    "unused-78ff",
+    "unused-79ff",
+    "unused-7aff",
+    "unused-7bff",
+    "unused-7cff",
+    "unused-7dff",
+    "unused-7eff",
+    "unused-7fff",
+    "unused-80ff",
+    "unused-81ff",
+    "unused-82ff",
+    "unused-83ff",
+    "unused-84ff",
+    "unused-85ff",
+    "unused-86ff",
+    "unused-87ff",
+    "unused-88ff",
+    "unused-89ff",
+    "unused-8aff",
+    "unused-8bff",
+    "unused-8cff",
+    "unused-8dff",
+    "unused-8eff",
+    "unused-8fff",
+    "unused-90ff",
+    "unused-91ff",
+    "unused-92ff",
+    "unused-93ff",
+    "unused-94ff",
+    "unused-95ff",
+    "unused-96ff",
+    "unused-97ff",
+    "unused-98ff",
+    "unused-99ff",
+    "unused-9aff",
+    "unused-9bff",
+    "unused-9cff",
+    "unused-9dff",
+    "unused-9eff",
+    "unused-9fff",
+    "unused-a0ff",
+    "unused-a1ff",
+    "unused-a2ff",
+    "unused-a3ff",
+    "unused-a4ff",
+    "unused-a5ff",
+    "unused-a6ff",
+    "unused-a7ff",
+    "unused-a8ff",
+    "unused-a9ff",
+    "unused-aaff",
+    "unused-abff",
+    "unused-acff",
+    "unused-adff",
+    "unused-aeff",
+    "unused-afff",
+    "unused-b0ff",
+    "unused-b1ff",
+    "unused-b2ff",
+    "unused-b3ff",
+    "unused-b4ff",
+    "unused-b5ff",
+    "unused-b6ff",
+    "unused-b7ff",
+    "unused-b8ff",
+    "unused-b9ff",
+    "unused-baff",
+    "unused-bbff",
+    "unused-bcff",
+    "unused-bdff",
+    "unused-beff",
+    "unused-bfff",
+    "unused-c0ff",
+    "unused-c1ff",
+    "unused-c2ff",
+    "unused-c3ff",
+    "unused-c4ff",
+    "unused-c5ff",
+    "unused-c6ff",
+    "unused-c7ff",
+    "unused-c8ff",
+    "unused-c9ff",
+    "unused-caff",
+    "unused-cbff",
+    "unused-ccff",
+    "unused-cdff",
+    "unused-ceff",
+    "unused-cfff",
+    "unused-d0ff",
+    "unused-d1ff",
+    "unused-d2ff",
+    "unused-d3ff",
+    "unused-d4ff",
+    "unused-d5ff",
+    "unused-d6ff",
+    "unused-d7ff",
+    "unused-d8ff",
+    "unused-d9ff",
+    "unused-daff",
+    "unused-dbff",
+    "unused-dcff",
+    "unused-ddff",
+    "unused-deff",
+    "unused-dfff",
+    "unused-e0ff",
+    "unused-e1ff",
+    "unused-e2ff",
+    "unused-e3ff",
+    "unused-e4ff",
+    "unused-e5ff",
+    "unused-e6ff",
+    "unused-e7ff",
+    "unused-e8ff",
+    "unused-e9ff",
+    "unused-eaff",
+    "unused-ebff",
+    "unused-ecff",
+    "unused-edff",
+    "unused-eeff",
+    "unused-efff",
+    "unused-f0ff",
+    "unused-f1ff",
+    "+invoke-object-init/jumbo",
+    "+iget-volatile/jumbo",
+    "+iget-wide-volatile/jumbo",
+    "+iget-object-volatile/jumbo",
+    "+iput-volatile/jumbo",
+    "+iput-wide-volatile/jumbo",
+    "+iput-object-volatile/jumbo",
+    "+sget-volatile/jumbo",
+    "+sget-wide-volatile/jumbo",
+    "+sget-object-volatile/jumbo",
+    "+sput-volatile/jumbo",
+    "+sput-wide-volatile/jumbo",
+    "+sput-object-volatile/jumbo",
+    "^throw-verification-error/jumbo",
     // END(libdex-opcode-names)
 };
 
diff --git a/libdex/DexOpcodes.h b/libdex/DexOpcodes.h
index fe9e9d1..07188b2 100644
--- a/libdex/DexOpcodes.h
+++ b/libdex/DexOpcodes.h
@@ -40,8 +40,8 @@
  * extended opcodes.
  */
 // BEGIN(libdex-maximum-values); GENERATED AUTOMATICALLY BY opcode-gen
-#define kMaxOpcodeValue 0xff
-#define kNumPackedOpcodes 0x100
+#define kMaxOpcodeValue 0xffff
+#define kNumPackedOpcodes 0x200
 // END(libdex-maximum-values); GENERATED AUTOMATICALLY BY opcode-gen
 
 /*
@@ -308,7 +308,7 @@
     OP_THROW_VERIFICATION_ERROR     = 0xed,
     OP_EXECUTE_INLINE               = 0xee,
     OP_EXECUTE_INLINE_RANGE         = 0xef,
-    OP_INVOKE_DIRECT_EMPTY          = 0xf0,
+    OP_INVOKE_OBJECT_INIT_RANGE     = 0xf0,
     OP_RETURN_VOID_BARRIER          = 0xf1,
     OP_IGET_QUICK                   = 0xf2,
     OP_IGET_WIDE_QUICK              = 0xf3,
@@ -324,6 +324,262 @@
     OP_SGET_OBJECT_VOLATILE         = 0xfd,
     OP_SPUT_OBJECT_VOLATILE         = 0xfe,
     OP_DISPATCH_FF                  = 0xff,
+    OP_CONST_CLASS_JUMBO            = 0x100,
+    OP_CHECK_CAST_JUMBO             = 0x101,
+    OP_INSTANCE_OF_JUMBO            = 0x102,
+    OP_NEW_INSTANCE_JUMBO           = 0x103,
+    OP_NEW_ARRAY_JUMBO              = 0x104,
+    OP_FILLED_NEW_ARRAY_JUMBO       = 0x105,
+    OP_IGET_JUMBO                   = 0x106,
+    OP_IGET_WIDE_JUMBO              = 0x107,
+    OP_IGET_OBJECT_JUMBO            = 0x108,
+    OP_IGET_BOOLEAN_JUMBO           = 0x109,
+    OP_IGET_BYTE_JUMBO              = 0x10a,
+    OP_IGET_CHAR_JUMBO              = 0x10b,
+    OP_IGET_SHORT_JUMBO             = 0x10c,
+    OP_IPUT_JUMBO                   = 0x10d,
+    OP_IPUT_WIDE_JUMBO              = 0x10e,
+    OP_IPUT_OBJECT_JUMBO            = 0x10f,
+    OP_IPUT_BOOLEAN_JUMBO           = 0x110,
+    OP_IPUT_BYTE_JUMBO              = 0x111,
+    OP_IPUT_CHAR_JUMBO              = 0x112,
+    OP_IPUT_SHORT_JUMBO             = 0x113,
+    OP_SGET_JUMBO                   = 0x114,
+    OP_SGET_WIDE_JUMBO              = 0x115,
+    OP_SGET_OBJECT_JUMBO            = 0x116,
+    OP_SGET_BOOLEAN_JUMBO           = 0x117,
+    OP_SGET_BYTE_JUMBO              = 0x118,
+    OP_SGET_CHAR_JUMBO              = 0x119,
+    OP_SGET_SHORT_JUMBO             = 0x11a,
+    OP_SPUT_JUMBO                   = 0x11b,
+    OP_SPUT_WIDE_JUMBO              = 0x11c,
+    OP_SPUT_OBJECT_JUMBO            = 0x11d,
+    OP_SPUT_BOOLEAN_JUMBO           = 0x11e,
+    OP_SPUT_BYTE_JUMBO              = 0x11f,
+    OP_SPUT_CHAR_JUMBO              = 0x120,
+    OP_SPUT_SHORT_JUMBO             = 0x121,
+    OP_INVOKE_VIRTUAL_JUMBO         = 0x122,
+    OP_INVOKE_SUPER_JUMBO           = 0x123,
+    OP_INVOKE_DIRECT_JUMBO          = 0x124,
+    OP_INVOKE_STATIC_JUMBO          = 0x125,
+    OP_INVOKE_INTERFACE_JUMBO       = 0x126,
+    OP_UNUSED_27FF                  = 0x127,
+    OP_UNUSED_28FF                  = 0x128,
+    OP_UNUSED_29FF                  = 0x129,
+    OP_UNUSED_2AFF                  = 0x12a,
+    OP_UNUSED_2BFF                  = 0x12b,
+    OP_UNUSED_2CFF                  = 0x12c,
+    OP_UNUSED_2DFF                  = 0x12d,
+    OP_UNUSED_2EFF                  = 0x12e,
+    OP_UNUSED_2FFF                  = 0x12f,
+    OP_UNUSED_30FF                  = 0x130,
+    OP_UNUSED_31FF                  = 0x131,
+    OP_UNUSED_32FF                  = 0x132,
+    OP_UNUSED_33FF                  = 0x133,
+    OP_UNUSED_34FF                  = 0x134,
+    OP_UNUSED_35FF                  = 0x135,
+    OP_UNUSED_36FF                  = 0x136,
+    OP_UNUSED_37FF                  = 0x137,
+    OP_UNUSED_38FF                  = 0x138,
+    OP_UNUSED_39FF                  = 0x139,
+    OP_UNUSED_3AFF                  = 0x13a,
+    OP_UNUSED_3BFF                  = 0x13b,
+    OP_UNUSED_3CFF                  = 0x13c,
+    OP_UNUSED_3DFF                  = 0x13d,
+    OP_UNUSED_3EFF                  = 0x13e,
+    OP_UNUSED_3FFF                  = 0x13f,
+    OP_UNUSED_40FF                  = 0x140,
+    OP_UNUSED_41FF                  = 0x141,
+    OP_UNUSED_42FF                  = 0x142,
+    OP_UNUSED_43FF                  = 0x143,
+    OP_UNUSED_44FF                  = 0x144,
+    OP_UNUSED_45FF                  = 0x145,
+    OP_UNUSED_46FF                  = 0x146,
+    OP_UNUSED_47FF                  = 0x147,
+    OP_UNUSED_48FF                  = 0x148,
+    OP_UNUSED_49FF                  = 0x149,
+    OP_UNUSED_4AFF                  = 0x14a,
+    OP_UNUSED_4BFF                  = 0x14b,
+    OP_UNUSED_4CFF                  = 0x14c,
+    OP_UNUSED_4DFF                  = 0x14d,
+    OP_UNUSED_4EFF                  = 0x14e,
+    OP_UNUSED_4FFF                  = 0x14f,
+    OP_UNUSED_50FF                  = 0x150,
+    OP_UNUSED_51FF                  = 0x151,
+    OP_UNUSED_52FF                  = 0x152,
+    OP_UNUSED_53FF                  = 0x153,
+    OP_UNUSED_54FF                  = 0x154,
+    OP_UNUSED_55FF                  = 0x155,
+    OP_UNUSED_56FF                  = 0x156,
+    OP_UNUSED_57FF                  = 0x157,
+    OP_UNUSED_58FF                  = 0x158,
+    OP_UNUSED_59FF                  = 0x159,
+    OP_UNUSED_5AFF                  = 0x15a,
+    OP_UNUSED_5BFF                  = 0x15b,
+    OP_UNUSED_5CFF                  = 0x15c,
+    OP_UNUSED_5DFF                  = 0x15d,
+    OP_UNUSED_5EFF                  = 0x15e,
+    OP_UNUSED_5FFF                  = 0x15f,
+    OP_UNUSED_60FF                  = 0x160,
+    OP_UNUSED_61FF                  = 0x161,
+    OP_UNUSED_62FF                  = 0x162,
+    OP_UNUSED_63FF                  = 0x163,
+    OP_UNUSED_64FF                  = 0x164,
+    OP_UNUSED_65FF                  = 0x165,
+    OP_UNUSED_66FF                  = 0x166,
+    OP_UNUSED_67FF                  = 0x167,
+    OP_UNUSED_68FF                  = 0x168,
+    OP_UNUSED_69FF                  = 0x169,
+    OP_UNUSED_6AFF                  = 0x16a,
+    OP_UNUSED_6BFF                  = 0x16b,
+    OP_UNUSED_6CFF                  = 0x16c,
+    OP_UNUSED_6DFF                  = 0x16d,
+    OP_UNUSED_6EFF                  = 0x16e,
+    OP_UNUSED_6FFF                  = 0x16f,
+    OP_UNUSED_70FF                  = 0x170,
+    OP_UNUSED_71FF                  = 0x171,
+    OP_UNUSED_72FF                  = 0x172,
+    OP_UNUSED_73FF                  = 0x173,
+    OP_UNUSED_74FF                  = 0x174,
+    OP_UNUSED_75FF                  = 0x175,
+    OP_UNUSED_76FF                  = 0x176,
+    OP_UNUSED_77FF                  = 0x177,
+    OP_UNUSED_78FF                  = 0x178,
+    OP_UNUSED_79FF                  = 0x179,
+    OP_UNUSED_7AFF                  = 0x17a,
+    OP_UNUSED_7BFF                  = 0x17b,
+    OP_UNUSED_7CFF                  = 0x17c,
+    OP_UNUSED_7DFF                  = 0x17d,
+    OP_UNUSED_7EFF                  = 0x17e,
+    OP_UNUSED_7FFF                  = 0x17f,
+    OP_UNUSED_80FF                  = 0x180,
+    OP_UNUSED_81FF                  = 0x181,
+    OP_UNUSED_82FF                  = 0x182,
+    OP_UNUSED_83FF                  = 0x183,
+    OP_UNUSED_84FF                  = 0x184,
+    OP_UNUSED_85FF                  = 0x185,
+    OP_UNUSED_86FF                  = 0x186,
+    OP_UNUSED_87FF                  = 0x187,
+    OP_UNUSED_88FF                  = 0x188,
+    OP_UNUSED_89FF                  = 0x189,
+    OP_UNUSED_8AFF                  = 0x18a,
+    OP_UNUSED_8BFF                  = 0x18b,
+    OP_UNUSED_8CFF                  = 0x18c,
+    OP_UNUSED_8DFF                  = 0x18d,
+    OP_UNUSED_8EFF                  = 0x18e,
+    OP_UNUSED_8FFF                  = 0x18f,
+    OP_UNUSED_90FF                  = 0x190,
+    OP_UNUSED_91FF                  = 0x191,
+    OP_UNUSED_92FF                  = 0x192,
+    OP_UNUSED_93FF                  = 0x193,
+    OP_UNUSED_94FF                  = 0x194,
+    OP_UNUSED_95FF                  = 0x195,
+    OP_UNUSED_96FF                  = 0x196,
+    OP_UNUSED_97FF                  = 0x197,
+    OP_UNUSED_98FF                  = 0x198,
+    OP_UNUSED_99FF                  = 0x199,
+    OP_UNUSED_9AFF                  = 0x19a,
+    OP_UNUSED_9BFF                  = 0x19b,
+    OP_UNUSED_9CFF                  = 0x19c,
+    OP_UNUSED_9DFF                  = 0x19d,
+    OP_UNUSED_9EFF                  = 0x19e,
+    OP_UNUSED_9FFF                  = 0x19f,
+    OP_UNUSED_A0FF                  = 0x1a0,
+    OP_UNUSED_A1FF                  = 0x1a1,
+    OP_UNUSED_A2FF                  = 0x1a2,
+    OP_UNUSED_A3FF                  = 0x1a3,
+    OP_UNUSED_A4FF                  = 0x1a4,
+    OP_UNUSED_A5FF                  = 0x1a5,
+    OP_UNUSED_A6FF                  = 0x1a6,
+    OP_UNUSED_A7FF                  = 0x1a7,
+    OP_UNUSED_A8FF                  = 0x1a8,
+    OP_UNUSED_A9FF                  = 0x1a9,
+    OP_UNUSED_AAFF                  = 0x1aa,
+    OP_UNUSED_ABFF                  = 0x1ab,
+    OP_UNUSED_ACFF                  = 0x1ac,
+    OP_UNUSED_ADFF                  = 0x1ad,
+    OP_UNUSED_AEFF                  = 0x1ae,
+    OP_UNUSED_AFFF                  = 0x1af,
+    OP_UNUSED_B0FF                  = 0x1b0,
+    OP_UNUSED_B1FF                  = 0x1b1,
+    OP_UNUSED_B2FF                  = 0x1b2,
+    OP_UNUSED_B3FF                  = 0x1b3,
+    OP_UNUSED_B4FF                  = 0x1b4,
+    OP_UNUSED_B5FF                  = 0x1b5,
+    OP_UNUSED_B6FF                  = 0x1b6,
+    OP_UNUSED_B7FF                  = 0x1b7,
+    OP_UNUSED_B8FF                  = 0x1b8,
+    OP_UNUSED_B9FF                  = 0x1b9,
+    OP_UNUSED_BAFF                  = 0x1ba,
+    OP_UNUSED_BBFF                  = 0x1bb,
+    OP_UNUSED_BCFF                  = 0x1bc,
+    OP_UNUSED_BDFF                  = 0x1bd,
+    OP_UNUSED_BEFF                  = 0x1be,
+    OP_UNUSED_BFFF                  = 0x1bf,
+    OP_UNUSED_C0FF                  = 0x1c0,
+    OP_UNUSED_C1FF                  = 0x1c1,
+    OP_UNUSED_C2FF                  = 0x1c2,
+    OP_UNUSED_C3FF                  = 0x1c3,
+    OP_UNUSED_C4FF                  = 0x1c4,
+    OP_UNUSED_C5FF                  = 0x1c5,
+    OP_UNUSED_C6FF                  = 0x1c6,
+    OP_UNUSED_C7FF                  = 0x1c7,
+    OP_UNUSED_C8FF                  = 0x1c8,
+    OP_UNUSED_C9FF                  = 0x1c9,
+    OP_UNUSED_CAFF                  = 0x1ca,
+    OP_UNUSED_CBFF                  = 0x1cb,
+    OP_UNUSED_CCFF                  = 0x1cc,
+    OP_UNUSED_CDFF                  = 0x1cd,
+    OP_UNUSED_CEFF                  = 0x1ce,
+    OP_UNUSED_CFFF                  = 0x1cf,
+    OP_UNUSED_D0FF                  = 0x1d0,
+    OP_UNUSED_D1FF                  = 0x1d1,
+    OP_UNUSED_D2FF                  = 0x1d2,
+    OP_UNUSED_D3FF                  = 0x1d3,
+    OP_UNUSED_D4FF                  = 0x1d4,
+    OP_UNUSED_D5FF                  = 0x1d5,
+    OP_UNUSED_D6FF                  = 0x1d6,
+    OP_UNUSED_D7FF                  = 0x1d7,
+    OP_UNUSED_D8FF                  = 0x1d8,
+    OP_UNUSED_D9FF                  = 0x1d9,
+    OP_UNUSED_DAFF                  = 0x1da,
+    OP_UNUSED_DBFF                  = 0x1db,
+    OP_UNUSED_DCFF                  = 0x1dc,
+    OP_UNUSED_DDFF                  = 0x1dd,
+    OP_UNUSED_DEFF                  = 0x1de,
+    OP_UNUSED_DFFF                  = 0x1df,
+    OP_UNUSED_E0FF                  = 0x1e0,
+    OP_UNUSED_E1FF                  = 0x1e1,
+    OP_UNUSED_E2FF                  = 0x1e2,
+    OP_UNUSED_E3FF                  = 0x1e3,
+    OP_UNUSED_E4FF                  = 0x1e4,
+    OP_UNUSED_E5FF                  = 0x1e5,
+    OP_UNUSED_E6FF                  = 0x1e6,
+    OP_UNUSED_E7FF                  = 0x1e7,
+    OP_UNUSED_E8FF                  = 0x1e8,
+    OP_UNUSED_E9FF                  = 0x1e9,
+    OP_UNUSED_EAFF                  = 0x1ea,
+    OP_UNUSED_EBFF                  = 0x1eb,
+    OP_UNUSED_ECFF                  = 0x1ec,
+    OP_UNUSED_EDFF                  = 0x1ed,
+    OP_UNUSED_EEFF                  = 0x1ee,
+    OP_UNUSED_EFFF                  = 0x1ef,
+    OP_UNUSED_F0FF                  = 0x1f0,
+    OP_UNUSED_F1FF                  = 0x1f1,
+    OP_INVOKE_OBJECT_INIT_JUMBO     = 0x1f2,
+    OP_IGET_VOLATILE_JUMBO          = 0x1f3,
+    OP_IGET_WIDE_VOLATILE_JUMBO     = 0x1f4,
+    OP_IGET_OBJECT_VOLATILE_JUMBO   = 0x1f5,
+    OP_IPUT_VOLATILE_JUMBO          = 0x1f6,
+    OP_IPUT_WIDE_VOLATILE_JUMBO     = 0x1f7,
+    OP_IPUT_OBJECT_VOLATILE_JUMBO   = 0x1f8,
+    OP_SGET_VOLATILE_JUMBO          = 0x1f9,
+    OP_SGET_WIDE_VOLATILE_JUMBO     = 0x1fa,
+    OP_SGET_OBJECT_VOLATILE_JUMBO   = 0x1fb,
+    OP_SPUT_VOLATILE_JUMBO          = 0x1fc,
+    OP_SPUT_WIDE_VOLATILE_JUMBO     = 0x1fd,
+    OP_SPUT_OBJECT_VOLATILE_JUMBO   = 0x1fe,
+    OP_THROW_VERIFICATION_ERROR_JUMBO = 0x1ff,
     // END(libdex-opcode-enum)
 } Opcode;
 
@@ -574,7 +830,7 @@
         H(OP_THROW_VERIFICATION_ERROR),                                       \
         H(OP_EXECUTE_INLINE),                                                 \
         H(OP_EXECUTE_INLINE_RANGE),                                           \
-        H(OP_INVOKE_DIRECT_EMPTY),                                            \
+        H(OP_INVOKE_OBJECT_INIT_RANGE),                                       \
         H(OP_RETURN_VOID_BARRIER),                                            \
         H(OP_IGET_QUICK),                                                     \
         H(OP_IGET_WIDE_QUICK),                                                \
@@ -590,6 +846,262 @@
         H(OP_SGET_OBJECT_VOLATILE),                                           \
         H(OP_SPUT_OBJECT_VOLATILE),                                           \
         H(OP_DISPATCH_FF),                                                    \
+        H(OP_CONST_CLASS_JUMBO),                                              \
+        H(OP_CHECK_CAST_JUMBO),                                               \
+        H(OP_INSTANCE_OF_JUMBO),                                              \
+        H(OP_NEW_INSTANCE_JUMBO),                                             \
+        H(OP_NEW_ARRAY_JUMBO),                                                \
+        H(OP_FILLED_NEW_ARRAY_JUMBO),                                         \
+        H(OP_IGET_JUMBO),                                                     \
+        H(OP_IGET_WIDE_JUMBO),                                                \
+        H(OP_IGET_OBJECT_JUMBO),                                              \
+        H(OP_IGET_BOOLEAN_JUMBO),                                             \
+        H(OP_IGET_BYTE_JUMBO),                                                \
+        H(OP_IGET_CHAR_JUMBO),                                                \
+        H(OP_IGET_SHORT_JUMBO),                                               \
+        H(OP_IPUT_JUMBO),                                                     \
+        H(OP_IPUT_WIDE_JUMBO),                                                \
+        H(OP_IPUT_OBJECT_JUMBO),                                              \
+        H(OP_IPUT_BOOLEAN_JUMBO),                                             \
+        H(OP_IPUT_BYTE_JUMBO),                                                \
+        H(OP_IPUT_CHAR_JUMBO),                                                \
+        H(OP_IPUT_SHORT_JUMBO),                                               \
+        H(OP_SGET_JUMBO),                                                     \
+        H(OP_SGET_WIDE_JUMBO),                                                \
+        H(OP_SGET_OBJECT_JUMBO),                                              \
+        H(OP_SGET_BOOLEAN_JUMBO),                                             \
+        H(OP_SGET_BYTE_JUMBO),                                                \
+        H(OP_SGET_CHAR_JUMBO),                                                \
+        H(OP_SGET_SHORT_JUMBO),                                               \
+        H(OP_SPUT_JUMBO),                                                     \
+        H(OP_SPUT_WIDE_JUMBO),                                                \
+        H(OP_SPUT_OBJECT_JUMBO),                                              \
+        H(OP_SPUT_BOOLEAN_JUMBO),                                             \
+        H(OP_SPUT_BYTE_JUMBO),                                                \
+        H(OP_SPUT_CHAR_JUMBO),                                                \
+        H(OP_SPUT_SHORT_JUMBO),                                               \
+        H(OP_INVOKE_VIRTUAL_JUMBO),                                           \
+        H(OP_INVOKE_SUPER_JUMBO),                                             \
+        H(OP_INVOKE_DIRECT_JUMBO),                                            \
+        H(OP_INVOKE_STATIC_JUMBO),                                            \
+        H(OP_INVOKE_INTERFACE_JUMBO),                                         \
+        H(OP_UNUSED_27FF),                                                    \
+        H(OP_UNUSED_28FF),                                                    \
+        H(OP_UNUSED_29FF),                                                    \
+        H(OP_UNUSED_2AFF),                                                    \
+        H(OP_UNUSED_2BFF),                                                    \
+        H(OP_UNUSED_2CFF),                                                    \
+        H(OP_UNUSED_2DFF),                                                    \
+        H(OP_UNUSED_2EFF),                                                    \
+        H(OP_UNUSED_2FFF),                                                    \
+        H(OP_UNUSED_30FF),                                                    \
+        H(OP_UNUSED_31FF),                                                    \
+        H(OP_UNUSED_32FF),                                                    \
+        H(OP_UNUSED_33FF),                                                    \
+        H(OP_UNUSED_34FF),                                                    \
+        H(OP_UNUSED_35FF),                                                    \
+        H(OP_UNUSED_36FF),                                                    \
+        H(OP_UNUSED_37FF),                                                    \
+        H(OP_UNUSED_38FF),                                                    \
+        H(OP_UNUSED_39FF),                                                    \
+        H(OP_UNUSED_3AFF),                                                    \
+        H(OP_UNUSED_3BFF),                                                    \
+        H(OP_UNUSED_3CFF),                                                    \
+        H(OP_UNUSED_3DFF),                                                    \
+        H(OP_UNUSED_3EFF),                                                    \
+        H(OP_UNUSED_3FFF),                                                    \
+        H(OP_UNUSED_40FF),                                                    \
+        H(OP_UNUSED_41FF),                                                    \
+        H(OP_UNUSED_42FF),                                                    \
+        H(OP_UNUSED_43FF),                                                    \
+        H(OP_UNUSED_44FF),                                                    \
+        H(OP_UNUSED_45FF),                                                    \
+        H(OP_UNUSED_46FF),                                                    \
+        H(OP_UNUSED_47FF),                                                    \
+        H(OP_UNUSED_48FF),                                                    \
+        H(OP_UNUSED_49FF),                                                    \
+        H(OP_UNUSED_4AFF),                                                    \
+        H(OP_UNUSED_4BFF),                                                    \
+        H(OP_UNUSED_4CFF),                                                    \
+        H(OP_UNUSED_4DFF),                                                    \
+        H(OP_UNUSED_4EFF),                                                    \
+        H(OP_UNUSED_4FFF),                                                    \
+        H(OP_UNUSED_50FF),                                                    \
+        H(OP_UNUSED_51FF),                                                    \
+        H(OP_UNUSED_52FF),                                                    \
+        H(OP_UNUSED_53FF),                                                    \
+        H(OP_UNUSED_54FF),                                                    \
+        H(OP_UNUSED_55FF),                                                    \
+        H(OP_UNUSED_56FF),                                                    \
+        H(OP_UNUSED_57FF),                                                    \
+        H(OP_UNUSED_58FF),                                                    \
+        H(OP_UNUSED_59FF),                                                    \
+        H(OP_UNUSED_5AFF),                                                    \
+        H(OP_UNUSED_5BFF),                                                    \
+        H(OP_UNUSED_5CFF),                                                    \
+        H(OP_UNUSED_5DFF),                                                    \
+        H(OP_UNUSED_5EFF),                                                    \
+        H(OP_UNUSED_5FFF),                                                    \
+        H(OP_UNUSED_60FF),                                                    \
+        H(OP_UNUSED_61FF),                                                    \
+        H(OP_UNUSED_62FF),                                                    \
+        H(OP_UNUSED_63FF),                                                    \
+        H(OP_UNUSED_64FF),                                                    \
+        H(OP_UNUSED_65FF),                                                    \
+        H(OP_UNUSED_66FF),                                                    \
+        H(OP_UNUSED_67FF),                                                    \
+        H(OP_UNUSED_68FF),                                                    \
+        H(OP_UNUSED_69FF),                                                    \
+        H(OP_UNUSED_6AFF),                                                    \
+        H(OP_UNUSED_6BFF),                                                    \
+        H(OP_UNUSED_6CFF),                                                    \
+        H(OP_UNUSED_6DFF),                                                    \
+        H(OP_UNUSED_6EFF),                                                    \
+        H(OP_UNUSED_6FFF),                                                    \
+        H(OP_UNUSED_70FF),                                                    \
+        H(OP_UNUSED_71FF),                                                    \
+        H(OP_UNUSED_72FF),                                                    \
+        H(OP_UNUSED_73FF),                                                    \
+        H(OP_UNUSED_74FF),                                                    \
+        H(OP_UNUSED_75FF),                                                    \
+        H(OP_UNUSED_76FF),                                                    \
+        H(OP_UNUSED_77FF),                                                    \
+        H(OP_UNUSED_78FF),                                                    \
+        H(OP_UNUSED_79FF),                                                    \
+        H(OP_UNUSED_7AFF),                                                    \
+        H(OP_UNUSED_7BFF),                                                    \
+        H(OP_UNUSED_7CFF),                                                    \
+        H(OP_UNUSED_7DFF),                                                    \
+        H(OP_UNUSED_7EFF),                                                    \
+        H(OP_UNUSED_7FFF),                                                    \
+        H(OP_UNUSED_80FF),                                                    \
+        H(OP_UNUSED_81FF),                                                    \
+        H(OP_UNUSED_82FF),                                                    \
+        H(OP_UNUSED_83FF),                                                    \
+        H(OP_UNUSED_84FF),                                                    \
+        H(OP_UNUSED_85FF),                                                    \
+        H(OP_UNUSED_86FF),                                                    \
+        H(OP_UNUSED_87FF),                                                    \
+        H(OP_UNUSED_88FF),                                                    \
+        H(OP_UNUSED_89FF),                                                    \
+        H(OP_UNUSED_8AFF),                                                    \
+        H(OP_UNUSED_8BFF),                                                    \
+        H(OP_UNUSED_8CFF),                                                    \
+        H(OP_UNUSED_8DFF),                                                    \
+        H(OP_UNUSED_8EFF),                                                    \
+        H(OP_UNUSED_8FFF),                                                    \
+        H(OP_UNUSED_90FF),                                                    \
+        H(OP_UNUSED_91FF),                                                    \
+        H(OP_UNUSED_92FF),                                                    \
+        H(OP_UNUSED_93FF),                                                    \
+        H(OP_UNUSED_94FF),                                                    \
+        H(OP_UNUSED_95FF),                                                    \
+        H(OP_UNUSED_96FF),                                                    \
+        H(OP_UNUSED_97FF),                                                    \
+        H(OP_UNUSED_98FF),                                                    \
+        H(OP_UNUSED_99FF),                                                    \
+        H(OP_UNUSED_9AFF),                                                    \
+        H(OP_UNUSED_9BFF),                                                    \
+        H(OP_UNUSED_9CFF),                                                    \
+        H(OP_UNUSED_9DFF),                                                    \
+        H(OP_UNUSED_9EFF),                                                    \
+        H(OP_UNUSED_9FFF),                                                    \
+        H(OP_UNUSED_A0FF),                                                    \
+        H(OP_UNUSED_A1FF),                                                    \
+        H(OP_UNUSED_A2FF),                                                    \
+        H(OP_UNUSED_A3FF),                                                    \
+        H(OP_UNUSED_A4FF),                                                    \
+        H(OP_UNUSED_A5FF),                                                    \
+        H(OP_UNUSED_A6FF),                                                    \
+        H(OP_UNUSED_A7FF),                                                    \
+        H(OP_UNUSED_A8FF),                                                    \
+        H(OP_UNUSED_A9FF),                                                    \
+        H(OP_UNUSED_AAFF),                                                    \
+        H(OP_UNUSED_ABFF),                                                    \
+        H(OP_UNUSED_ACFF),                                                    \
+        H(OP_UNUSED_ADFF),                                                    \
+        H(OP_UNUSED_AEFF),                                                    \
+        H(OP_UNUSED_AFFF),                                                    \
+        H(OP_UNUSED_B0FF),                                                    \
+        H(OP_UNUSED_B1FF),                                                    \
+        H(OP_UNUSED_B2FF),                                                    \
+        H(OP_UNUSED_B3FF),                                                    \
+        H(OP_UNUSED_B4FF),                                                    \
+        H(OP_UNUSED_B5FF),                                                    \
+        H(OP_UNUSED_B6FF),                                                    \
+        H(OP_UNUSED_B7FF),                                                    \
+        H(OP_UNUSED_B8FF),                                                    \
+        H(OP_UNUSED_B9FF),                                                    \
+        H(OP_UNUSED_BAFF),                                                    \
+        H(OP_UNUSED_BBFF),                                                    \
+        H(OP_UNUSED_BCFF),                                                    \
+        H(OP_UNUSED_BDFF),                                                    \
+        H(OP_UNUSED_BEFF),                                                    \
+        H(OP_UNUSED_BFFF),                                                    \
+        H(OP_UNUSED_C0FF),                                                    \
+        H(OP_UNUSED_C1FF),                                                    \
+        H(OP_UNUSED_C2FF),                                                    \
+        H(OP_UNUSED_C3FF),                                                    \
+        H(OP_UNUSED_C4FF),                                                    \
+        H(OP_UNUSED_C5FF),                                                    \
+        H(OP_UNUSED_C6FF),                                                    \
+        H(OP_UNUSED_C7FF),                                                    \
+        H(OP_UNUSED_C8FF),                                                    \
+        H(OP_UNUSED_C9FF),                                                    \
+        H(OP_UNUSED_CAFF),                                                    \
+        H(OP_UNUSED_CBFF),                                                    \
+        H(OP_UNUSED_CCFF),                                                    \
+        H(OP_UNUSED_CDFF),                                                    \
+        H(OP_UNUSED_CEFF),                                                    \
+        H(OP_UNUSED_CFFF),                                                    \
+        H(OP_UNUSED_D0FF),                                                    \
+        H(OP_UNUSED_D1FF),                                                    \
+        H(OP_UNUSED_D2FF),                                                    \
+        H(OP_UNUSED_D3FF),                                                    \
+        H(OP_UNUSED_D4FF),                                                    \
+        H(OP_UNUSED_D5FF),                                                    \
+        H(OP_UNUSED_D6FF),                                                    \
+        H(OP_UNUSED_D7FF),                                                    \
+        H(OP_UNUSED_D8FF),                                                    \
+        H(OP_UNUSED_D9FF),                                                    \
+        H(OP_UNUSED_DAFF),                                                    \
+        H(OP_UNUSED_DBFF),                                                    \
+        H(OP_UNUSED_DCFF),                                                    \
+        H(OP_UNUSED_DDFF),                                                    \
+        H(OP_UNUSED_DEFF),                                                    \
+        H(OP_UNUSED_DFFF),                                                    \
+        H(OP_UNUSED_E0FF),                                                    \
+        H(OP_UNUSED_E1FF),                                                    \
+        H(OP_UNUSED_E2FF),                                                    \
+        H(OP_UNUSED_E3FF),                                                    \
+        H(OP_UNUSED_E4FF),                                                    \
+        H(OP_UNUSED_E5FF),                                                    \
+        H(OP_UNUSED_E6FF),                                                    \
+        H(OP_UNUSED_E7FF),                                                    \
+        H(OP_UNUSED_E8FF),                                                    \
+        H(OP_UNUSED_E9FF),                                                    \
+        H(OP_UNUSED_EAFF),                                                    \
+        H(OP_UNUSED_EBFF),                                                    \
+        H(OP_UNUSED_ECFF),                                                    \
+        H(OP_UNUSED_EDFF),                                                    \
+        H(OP_UNUSED_EEFF),                                                    \
+        H(OP_UNUSED_EFFF),                                                    \
+        H(OP_UNUSED_F0FF),                                                    \
+        H(OP_UNUSED_F1FF),                                                    \
+        H(OP_INVOKE_OBJECT_INIT_JUMBO),                                       \
+        H(OP_IGET_VOLATILE_JUMBO),                                            \
+        H(OP_IGET_WIDE_VOLATILE_JUMBO),                                       \
+        H(OP_IGET_OBJECT_VOLATILE_JUMBO),                                     \
+        H(OP_IPUT_VOLATILE_JUMBO),                                            \
+        H(OP_IPUT_WIDE_VOLATILE_JUMBO),                                       \
+        H(OP_IPUT_OBJECT_VOLATILE_JUMBO),                                     \
+        H(OP_SGET_VOLATILE_JUMBO),                                            \
+        H(OP_SGET_WIDE_VOLATILE_JUMBO),                                       \
+        H(OP_SGET_OBJECT_VOLATILE_JUMBO),                                     \
+        H(OP_SPUT_VOLATILE_JUMBO),                                            \
+        H(OP_SPUT_WIDE_VOLATILE_JUMBO),                                       \
+        H(OP_SPUT_OBJECT_VOLATILE_JUMBO),                                     \
+        H(OP_THROW_VERIFICATION_ERROR_JUMBO),                                 \
         /* END(libdex-goto-table) */                                          \
     };
 
diff --git a/libdex/DexSwapVerify.c b/libdex/DexSwapVerify.c
index a467fa7..5fd3f09 100644
--- a/libdex/DexSwapVerify.c
+++ b/libdex/DexSwapVerify.c
@@ -22,6 +22,7 @@
 #include "DexClass.h"
 #include "DexDataMap.h"
 #include "DexProto.h"
+#include "DexUtf.h"
 #include "Leb128.h"
 
 #include <safe_iop.h>
diff --git a/libdex/DexUtf.c b/libdex/DexUtf.c
new file mode 100644
index 0000000..df49d18
--- /dev/null
+++ b/libdex/DexUtf.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Validate and manipulate MUTF-8 encoded string data.
+ */
+
+#include "DexUtf.h"
+
+/* Compare two '\0'-terminated modified UTF-8 strings, using Unicode
+ * code point values for comparison. This treats different encodings
+ * for the same code point as equivalent, except that only a real '\0'
+ * byte is considered the string terminator. The return value is as
+ * for strcmp(). */
+int dexUtf8Cmp(const char* s1, const char* s2) {
+    for (;;) {
+        if (*s1 == '\0') {
+            if (*s2 == '\0') {
+                return 0;
+            }
+            return -1;
+        } else if (*s2 == '\0') {
+            return 1;
+        }
+
+        int utf1 = dexGetUtf16FromUtf8(&s1);
+        int utf2 = dexGetUtf16FromUtf8(&s2);
+        int diff = utf1 - utf2;
+
+        if (diff != 0) {
+            return diff;
+        }
+    }
+}
+
+/* for dexIsValidMemberNameUtf8(), a bit vector indicating valid low ascii */
+u4 DEX_MEMBER_VALID_LOW_ASCII[4] = {
+    0x00000000, // 00..1f low control characters; nothing valid
+    0x03ff2010, // 20..3f digits and symbols; valid: '0'..'9', '$', '-'
+    0x87fffffe, // 40..5f uppercase etc.; valid: 'A'..'Z', '_'
+    0x07fffffe  // 60..7f lowercase etc.; valid: 'a'..'z'
+};
+
+/* Helper for dexIsValidMemberNameUtf8(); do not call directly. */
+bool dexIsValidMemberNameUtf8_0(const char** pUtf8Ptr) {
+    /*
+     * It's a multibyte encoded character. Decode it and analyze. We
+     * accept anything that isn't (a) an improperly encoded low value,
+     * (b) an improper surrogate pair, (c) an encoded '\0', (d) a high
+     * control character, or (e) a high space, layout, or special
+     * character (U+00a0, U+2000..U+200f, U+2028..U+202f,
+     * U+fff0..U+ffff). This is all specified in the dex format
+     * document.
+     */
+
+    u2 utf16 = dexGetUtf16FromUtf8(pUtf8Ptr);
+
+    // Perform follow-up tests based on the high 8 bits.
+    switch (utf16 >> 8) {
+        case 0x00: {
+            // It's only valid if it's above the ISO-8859-1 high space (0xa0).
+            return (utf16 > 0x00a0);
+        }
+        case 0xd8:
+        case 0xd9:
+        case 0xda:
+        case 0xdb: {
+            /*
+             * It's a leading surrogate. Check to see that a trailing
+             * surrogate follows.
+             */
+            utf16 = dexGetUtf16FromUtf8(pUtf8Ptr);
+            return (utf16 >= 0xdc00) && (utf16 <= 0xdfff);
+        }
+        case 0xdc:
+        case 0xdd:
+        case 0xde:
+        case 0xdf: {
+            // It's a trailing surrogate, which is not valid at this point.
+            return false;
+        }
+        case 0x20:
+        case 0xff: {
+            // It's in the range that has spaces, controls, and specials.
+            switch (utf16 & 0xfff8) {
+                case 0x2000:
+                case 0x2008:
+                case 0x2028:
+                case 0xfff0:
+                case 0xfff8: {
+                    return false;
+                }
+            }
+            break;
+        }
+    }
+
+    return true;
+}
+
+/* Return whether the given string is a valid field or method name. */
+bool dexIsValidMemberName(const char* s) {
+    bool angleName = false;
+
+    switch (*s) {
+        case '\0': {
+            // The empty string is not a valid name.
+            return false;
+        }
+        case '<': {
+            /*
+             * '<' is allowed only at the start of a name, and if present,
+             * means that the name must end with '>'.
+             */
+            angleName = true;
+            s++;
+            break;
+        }
+    }
+
+    for (;;) {
+        switch (*s) {
+            case '\0': {
+                return !angleName;
+            }
+            case '>': {
+                return angleName && s[1] == '\0';
+            }
+        }
+        if (!dexIsValidMemberNameUtf8(&s)) {
+            return false;
+        }
+    }
+}
+
+/* Helper for validating type descriptors and class names, which is parametric
+ * with respect to type vs. class and dot vs. slash. */
+static bool isValidTypeDescriptorOrClassName(const char* s, bool isClassName,
+        bool dotSeparator) {
+    int arrayCount = 0;
+
+    while (*s == '[') {
+        arrayCount++;
+        s++;
+    }
+
+    if (arrayCount > 255) {
+        // Arrays may have no more than 255 dimensions.
+        return false;
+    }
+
+    if (arrayCount != 0) {
+        /*
+         * If we're looking at an array of some sort, then it doesn't
+         * matter if what is being asked for is a class name; the
+         * format looks the same as a type descriptor in that case, so
+         * treat it as such.
+         */
+        isClassName = false;
+    }
+
+    if (!isClassName) {
+        /*
+         * We are looking for a descriptor. Either validate it as a
+         * single-character primitive type, or continue on to check the
+         * embedded class name (bracketed by "L" and ";").
+         */
+        switch (*(s++)) {
+            case 'B':
+            case 'C':
+            case 'D':
+            case 'F':
+            case 'I':
+            case 'J':
+            case 'S':
+            case 'Z': {
+                // These are all single-character descriptors for primitive types.
+                return (*s == '\0');
+            }
+            case 'V': {
+                // Non-array void is valid, but you can't have an array of void.
+                return (arrayCount == 0) && (*s == '\0');
+            }
+            case 'L': {
+                // Class name: Break out and continue below.
+                break;
+            }
+            default: {
+                // Oddball descriptor character.
+                return false;
+            }
+        }
+    }
+
+    /*
+     * We just consumed the 'L' that introduces a class name as part
+     * of a type descriptor, or we are looking for an unadorned class
+     * name.
+     */
+
+    bool sepOrFirst = true; // first character or just encountered a separator.
+    for (;;) {
+        u1 c = (u1) *s;
+        switch (c) {
+            case '\0': {
+                /*
+                 * Premature end for a type descriptor, but valid for
+                 * a class name as long as we haven't encountered an
+                 * empty component (including the degenerate case of
+                 * the empty string "").
+                 */
+                return isClassName && !sepOrFirst;
+            }
+            case ';': {
+                /*
+                 * Invalid character for a class name, but the
+                 * legitimate end of a type descriptor. In the latter
+                 * case, make sure that this is the end of the string
+                 * and that it doesn't end with an empty component
+                 * (including the degenerate case of "L;").
+                 */
+                return !isClassName && !sepOrFirst && (s[1] == '\0');
+            }
+            case '/':
+            case '.': {
+                if (dotSeparator != (c == '.')) {
+                    // The wrong separator character.
+                    return false;
+                }
+                if (sepOrFirst) {
+                    // Separator at start or two separators in a row.
+                    return false;
+                }
+                sepOrFirst = true;
+                s++;
+                break;
+            }
+            default: {
+                if (!dexIsValidMemberNameUtf8(&s)) {
+                    return false;
+                }
+                sepOrFirst = false;
+                break;
+            }
+        }
+    }
+}
+
+/* Return whether the given string is a valid type descriptor. */
+bool dexIsValidTypeDescriptor(const char* s) {
+    return isValidTypeDescriptorOrClassName(s, false, false);
+}
+
+/* (documented in header) */
+bool dexIsValidClassName(const char* s, bool dotSeparator) {
+    return isValidTypeDescriptorOrClassName(s, true, dotSeparator);
+}
+
+/* Return whether the given string is a valid reference descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for a class or array and not a primitive type. */
+bool dexIsReferenceDescriptor(const char* s) {
+    if (!dexIsValidTypeDescriptor(s)) {
+        return false;
+    }
+
+    return (s[0] == 'L') || (s[0] == '[');
+}
+
+/* Return whether the given string is a valid class descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for a class and not an array or primitive type. */
+bool dexIsClassDescriptor(const char* s) {
+    if (!dexIsValidTypeDescriptor(s)) {
+        return false;
+    }
+
+    return s[0] == 'L';
+}
+
+/* Return whether the given string is a valid field type descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for anything but "void". */
+bool dexIsFieldDescriptor(const char* s) {
+    if (!dexIsValidTypeDescriptor(s)) {
+        return false;
+    }
+
+    return s[0] != 'V';
+}
+
diff --git a/libdex/DexUtf.h b/libdex/DexUtf.h
new file mode 100644
index 0000000..a7eb28c
--- /dev/null
+++ b/libdex/DexUtf.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Validate and manipulate MUTF-8 (modified UTF-8) encoded string data.
+ */
+
+#ifndef _LIBDEX_DEXUTF
+#define _LIBDEX_DEXUTF
+
+#include "DexFile.h"
+
+/*
+ * Retrieve the next UTF-16 character from a UTF-8 string.
+ *
+ * Advances "*pUtf8Ptr" to the start of the next character.
+ *
+ * WARNING: If a string is corrupted by dropping a '\0' in the middle
+ * of a 3-byte sequence, you can end up overrunning the buffer with
+ * reads (and possibly with the writes if the length was computed and
+ * cached before the damage). For performance reasons, this function
+ * assumes that the string being parsed is known to be valid (e.g., by
+ * already being verified). Most strings we process here are coming
+ * out of dex files or other internal translations, so the only real
+ * risk comes from the JNI NewStringUTF call.
+ */
+DEX_INLINE u2 dexGetUtf16FromUtf8(const char** pUtf8Ptr)
+{
+    unsigned int one, two, three;
+
+    one = *(*pUtf8Ptr)++;
+    if ((one & 0x80) != 0) {
+        /* two- or three-byte encoding */
+        two = *(*pUtf8Ptr)++;
+        if ((one & 0x20) != 0) {
+            /* three-byte encoding */
+            three = *(*pUtf8Ptr)++;
+            return ((one & 0x0f) << 12) |
+                   ((two & 0x3f) << 6) |
+                   (three & 0x3f);
+        } else {
+            /* two-byte encoding */
+            return ((one & 0x1f) << 6) |
+                   (two & 0x3f);
+        }
+    } else {
+        /* one-byte encoding */
+        return one;
+    }
+}
+
+/* Compare two '\0'-terminated modified UTF-8 strings, using Unicode
+ * code point values for comparison. This treats different encodings
+ * for the same code point as equivalent, except that only a real '\0'
+ * byte is considered the string terminator. The return value is as
+ * for strcmp(). */
+int dexUtf8Cmp(const char* s1, const char* s2);
+
+/* for dexIsValidMemberNameUtf8(), a bit vector indicating valid low ascii */
+extern u4 DEX_MEMBER_VALID_LOW_ASCII[4];
+
+/* Helper for dexIsValidMemberUtf8(); do not call directly. */
+bool dexIsValidMemberNameUtf8_0(const char** pUtf8Ptr);
+
+/* Return whether the pointed-at modified-UTF-8 encoded character is
+ * valid as part of a member name, updating the pointer to point past
+ * the consumed character. This will consume two encoded UTF-16 code
+ * points if the character is encoded as a surrogate pair. Also, if
+ * this function returns false, then the given pointer may only have
+ * been partially advanced. */
+DEX_INLINE bool dexIsValidMemberNameUtf8(const char** pUtf8Ptr) {
+    u1 c = (u1) **pUtf8Ptr;
+    if (c <= 0x7f) {
+        // It's low-ascii, so check the table.
+        u4 wordIdx = c >> 5;
+        u4 bitIdx = c & 0x1f;
+        (*pUtf8Ptr)++;
+        return (DEX_MEMBER_VALID_LOW_ASCII[wordIdx] & (1 << bitIdx)) != 0;
+    }
+
+    /*
+     * It's a multibyte encoded character. Call a non-inline function
+     * for the heavy lifting.
+     */
+    return dexIsValidMemberNameUtf8_0(pUtf8Ptr);
+}
+
+/* Return whether the given string is a valid field or method name. */
+bool dexIsValidMemberName(const char* s);
+
+/* Return whether the given string is a valid type descriptor. */
+bool dexIsValidTypeDescriptor(const char* s);
+
+/* Return whether the given string is a valid internal-form class
+ * name, with components separated either by dots or slashes as
+ * specified. A class name is like a type descriptor, except that it
+ * can't name a primitive type (including void). In terms of syntax,
+ * the form is either (a) the name of the class without adornment
+ * (that is, not bracketed by "L" and ";"); or (b) identical to the
+ * type descriptor syntax for array types. */
+bool dexIsValidClassName(const char* s, bool dotSeparator);
+
+/* Return whether the given string is a valid reference descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for a class or array and not a primitive type. */
+bool dexIsReferenceDescriptor(const char* s);
+
+/* Return whether the given string is a valid class descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for a class and not an array or primitive type. */
+bool dexIsClassDescriptor(const char* s);
+
+/* Return whether the given string is a valid field type descriptor. This
+ * is true if dexIsValidTypeDescriptor() returns true and the descriptor
+ * is for anything but "void". */
+bool dexIsFieldDescriptor(const char* s);
+
+#endif /* def _LIBDEX_DEXUTF */
diff --git a/libdex/InstrUtils.c b/libdex/InstrUtils.c
index 9d21a04..0e72d43 100644
--- a/libdex/InstrUtils.c
+++ b/libdex/InstrUtils.c
@@ -48,6 +48,22 @@
     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 3, 3,
     3, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 0,
+    4, 4, 5, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+    5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4,
     // END(libdex-widths)
 };
 
@@ -313,6 +329,262 @@
     kInstrCanContinue|kInstrCanThrow,
     kInstrCanContinue|kInstrCanThrow,
     0,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow|kInstrInvoke,
+    kInstrCanContinue|kInstrCanThrow|kInstrInvoke,
+    kInstrCanContinue|kInstrCanThrow|kInstrInvoke,
+    kInstrCanContinue|kInstrCanThrow|kInstrInvoke,
+    kInstrCanContinue|kInstrCanThrow|kInstrInvoke,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    kInstrCanContinue,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanContinue|kInstrCanThrow,
+    kInstrCanThrow,
     // END(libdex-flags)
 };
 
@@ -358,7 +630,44 @@
     kFmt22c,  kFmt22c,  kFmt22c,  kFmt21c,  kFmt21c,  kFmt00x,  kFmt20bc,
     kFmt35mi, kFmt3rmi, kFmt35c,  kFmt10x,  kFmt22cs, kFmt22cs, kFmt22cs,
     kFmt22cs, kFmt22cs, kFmt22cs, kFmt35ms, kFmt3rms, kFmt35ms, kFmt3rms,
-    kFmt22c,  kFmt21c,  kFmt21c,  kFmt00x,
+    kFmt22c,  kFmt21c,  kFmt21c,  kFmt00x,  kFmt41c,  kFmt41c,  kFmt52c,
+    kFmt41c,  kFmt52c,  kFmt5rc,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,
+    kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,
+    kFmt52c,  kFmt52c,  kFmt52c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,
+    kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,
+    kFmt41c,  kFmt41c,  kFmt41c,  kFmt5rc,  kFmt5rc,  kFmt5rc,  kFmt5rc,
+    kFmt5rc,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,  kFmt00x,
+    kFmt00x,  kFmt5rc,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,  kFmt52c,
+    kFmt52c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,  kFmt41c,
+    kFmt40sc,
     // END(libdex-formats)
 };
 
@@ -453,7 +762,92 @@
     kIndexFieldOffset,  kIndexFieldOffset,  kIndexVtableOffset,
     kIndexVtableOffset, kIndexVtableOffset, kIndexVtableOffset,
     kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
-    kIndexUnknown,
+    kIndexUnknown,      kIndexTypeRef,      kIndexTypeRef,
+    kIndexTypeRef,      kIndexTypeRef,      kIndexTypeRef,
+    kIndexTypeRef,      kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexMethodRef,
+    kIndexMethodRef,    kIndexMethodRef,    kIndexMethodRef,
+    kIndexMethodRef,    kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexUnknown,      kIndexUnknown,      kIndexUnknown,
+    kIndexMethodRef,    kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexFieldRef,     kIndexFieldRef,
+    kIndexFieldRef,     kIndexVaries,
     // END(libdex-index-types)
 };
 
@@ -652,6 +1046,7 @@
         pDec->vB = FETCH(1) >> 8;
         pDec->vC = (s2) FETCH(2);                   // sign-extend 16-bit value
         break;
+    case kFmt40sc:      // [opt] exop AAAA, thing@BBBBBBBB
     case kFmt41c:       // exop vAAAA, thing@BBBBBBBB
         /*
          * The order of fields for this format in the spec is {B, A},
diff --git a/libdex/InstrUtils.h b/libdex/InstrUtils.h
index 3f77411..f09429c 100644
--- a/libdex/InstrUtils.h
+++ b/libdex/InstrUtils.h
@@ -63,6 +63,7 @@
     kFmt3rmi,       // [opt] inline invoke/range
     kFmt33x,        // exop vAA, vBB, vCCCC
     kFmt32s,        // exop vAA, vBB, #+CCCC
+    kFmt40sc,       // [opt] exop AAAA, thing@BBBBBBBB
     kFmt41c,        // exop vAAAA, thing@BBBBBBBB
     kFmt52c,        // exop vAAAA, vBBBB, thing@CCCCCCCC
     kFmt5rc,        // exop {vCCCC .. v(CCCC+AAAA-1)}, thing@BBBBBBBB
diff --git a/opcode-gen/bytecode.txt b/opcode-gen/bytecode.txt
index 007c41d..7f69214 100644
--- a/opcode-gen/bytecode.txt
+++ b/opcode-gen/bytecode.txt
@@ -48,6 +48,7 @@
 format 35ms
 format 3rmi
 format 3rms
+format 40sc
 
 # One line per opcode. Columns are:
 #   hex for opcode
@@ -327,12 +328,11 @@
 op   ee +execute-inline             35mi n inline-method optimized|continue|throw
 op   ef +execute-inline/range       3rmi n inline-method optimized|continue|throw
 
-# This opcode is marked neither as "invoke" nor "throw" since it is
-# executed as a nop except if a debugger is attached. And given that
-# dexopt would have been the thing that inserted this instruction,
-# having already verified the method and the one it (would have)
-# called, we know that it couldn't possibly throw.
-op   f0 +invoke-direct-empty        35c  n method-ref    optimized|continue
+# This opcode is marked neither as "invoke" nor "throw" since it
+# doesn't invoke the method unless a debugger is attached. The verifier
+# will ensure that Object.<init> is present and empty, so there is no
+# opportunity for an exception to be thrown.
+op   f0 +invoke-object-init/range   35c  n method-ref    optimized|continue
 
 op   f1 +return-void-barrier        10x  n none          optimized|return
 op   f2 +iget-quick                 22cs y field-offset  optimized|continue|throw
@@ -392,4 +392,26 @@
 op 24ff invoke-direct/jumbo         5rc  n method-ref    continue|throw|invoke
 op 25ff invoke-static/jumbo         5rc  n method-ref    continue|throw|invoke
 op 26ff invoke-interface/jumbo      5rc  n method-ref    continue|throw|invoke
-# unused: op 27ff..ffff
+
+# unused: op 27ff..f1ff
+
+#
+# Optimized opcodes (not valid in an unoptimized dex file)
+#
+
+# See notes above invoke-object-init re: no "throw" or "invoke"
+op f2ff +invoke-object-init/jumbo   5rc  n method-ref    optimized|continue
+
+op f3ff +iget-volatile/jumbo        52c  y field-ref     optimized|continue|throw
+op f4ff +iget-wide-volatile/jumbo   52c  y field-ref     optimized|continue|throw
+op f5ff +iget-object-volatile/jumbo 52c  y field-ref     optimized|continue|throw
+op f6ff +iput-volatile/jumbo        52c  n field-ref     optimized|continue|throw
+op f7ff +iput-wide-volatile/jumbo   52c  n field-ref     optimized|continue|throw
+op f8ff +iput-object-volatile/jumbo 52c  n field-ref     optimized|continue|throw
+op f9ff +sget-volatile/jumbo        41c  y field-ref     optimized|continue|throw
+op faff +sget-wide-volatile/jumbo   41c  y field-ref     optimized|continue|throw
+op fbff +sget-object-volatile/jumbo 41c  y field-ref     optimized|continue|throw
+op fcff +sput-volatile/jumbo        41c  n field-ref     optimized|continue|throw
+op fdff +sput-wide-volatile/jumbo   41c  n field-ref     optimized|continue|throw
+op feff +sput-object-volatile/jumbo 41c  n field-ref     optimized|continue|throw
+op ffff ^throw-verification-error/jumbo 40sc n varies    optimized|throw
diff --git a/opcode-gen/opcode-gen.awk b/opcode-gen/opcode-gen.awk
index 934413f..0e0ff6c 100644
--- a/opcode-gen/opcode-gen.awk
+++ b/opcode-gen/opcode-gen.awk
@@ -23,7 +23,6 @@
 BEGIN {
     MAX_OPCODE = 65535;
     MAX_PACKED_OPCODE = 511;
-    MAX_PACKED_OPCODE = 255; # TODO: Not for long!
     initIndexTypes();
     initFlags();
     if (readBytecodes()) exit 1;
@@ -81,7 +80,7 @@
     for (i = 0; i <= MAX_OPCODE; i++) {
         if (isUnused(i) || isOptimized(i)) continue;
         if (isFirst[i] == "true") {
-            printf("    //     DalvOps.%s\n", constName[i]);
+            printf("    //     Opcodes.%s\n", constName[i]);
         }
     }
 }
@@ -96,15 +95,30 @@
         nextOp = (nextOp == -1) ? "NO_NEXT" : constName[nextOp];
 
         printf("    public static final Dop %s =\n" \
-               "        new Dop(DalvOps.%s, DalvOps.%s,\n" \
-               "            DalvOps.%s, Form%s.THE_ONE, %s,\n" \
-               "            \"%s\");\n\n",
+               "        new Dop(Opcodes.%s, Opcodes.%s,\n" \
+               "            Opcodes.%s, Form%s.THE_ONE, %s);\n\n",
                constName[i], constName[i], family[i], nextOp, format[i],
-               hasResult[i], name[i]);
+               hasResult[i]);
     }
 }
 
-emission == "dops-init" {
+emission == "opcode-info-defs" {
+    emissionHandled = 1;
+
+    for (i = 0; i <= MAX_OPCODE; i++) {
+        if (isUnused(i) || isOptimized(i)) continue;
+
+        itype = toupper(indexType[i]);
+        gsub(/-/, "_", itype);
+
+        printf("    public static final Info %s =\n" \
+               "        new Info(Opcodes.%s, \"%s\",\n" \
+               "            InstructionCodec.FORMAT_%s, IndexType.%s);\n\n", \
+               constName[i], constName[i], name[i], toupper(format[i]), itype);
+    }
+}
+
+emission == "dops-init" || emission == "opcode-info-init" {
     emissionHandled = 1;
 
     for (i = 0; i <= MAX_OPCODE; i++) {
diff --git a/opcode-gen/regen-all b/opcode-gen/regen-all
index 68a76c3..afe6624 100755
--- a/opcode-gen/regen-all
+++ b/opcode-gen/regen-all
@@ -34,9 +34,10 @@
 # Be in the parent of the progdir when running the rest of the script.
 cd ".."
 
-${progdir}/opcode-gen dx/src/com/android/dx/dex/code/DalvOps.java
 ${progdir}/opcode-gen dx/src/com/android/dx/dex/code/Dops.java
 ${progdir}/opcode-gen dx/src/com/android/dx/dex/code/RopToDop.java
+${progdir}/opcode-gen dx/src/com/android/dx/io/OpcodeInfo.java
+${progdir}/opcode-gen dx/src/com/android/dx/io/Opcodes.java
 ${progdir}/opcode-gen libdex/DexOpcodes.c
 ${progdir}/opcode-gen libdex/DexOpcodes.h
 ${progdir}/opcode-gen libdex/InstrUtils.c
diff --git a/tests/057-iteration-performance/expected.txt b/tests/057-iteration-performance/expected.txt
deleted file mode 100644
index 9d59a5e..0000000
--- a/tests/057-iteration-performance/expected.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Running A...
-Running B...
-Running C...
-Running D...
-Running E...
-Running F...
-Running G...
-Running H...
-Done with runs.
-
-All times are within the expected ranges.
diff --git a/tests/057-iteration-performance/info.txt b/tests/057-iteration-performance/info.txt
deleted file mode 100644
index 36b5adc..0000000
--- a/tests/057-iteration-performance/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-This is a performance test of various iterator uses. To see the numbers,
-invoke this test with the "--timing" option.
diff --git a/tests/057-iteration-performance/src/Main.java b/tests/057-iteration-performance/src/Main.java
deleted file mode 100644
index d562802..0000000
--- a/tests/057-iteration-performance/src/Main.java
+++ /dev/null
@@ -1,1108 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.ArrayList;
-
-/**
- * The matrix of tests includes the A-E axis for loop body contents and
- * the 0-5 axis for iterator style.
- *
- * <ul>
- * <li>A: empty body</li>
- * <li>B: array element access and update</li>
- * <li>C: instance field access and update</li>
- * <li>D: method call to empty method</li>
- * <li>E: synch and then method call to empty method</li>
- * <li>F: 5 method calls to empty method</li>
- * <li>G: one small object allocation (empty constructor)</li>
- * <li>H: copy 8k of bytes from one array to another</li>
- * </ul>
- *
- * <ul>
- * <li>0: for() loop backward to 0</li>
- * <li>1: for() loop forward to local variable</li>
- * <li>2: for() loop forward to array length</li>
- * <li>3: for(:) loop over array</li>
- * <li>4: for() loop forward to instance variable</li>
- * <li>5: for() loop forward to trivial method call</li>
- * <li>6: for(:) loop over ArrayList</li>
- * </ul>
- */
-public class Main {
-    static public final int BODIES = 8;
-    static public final int LOOPS = 7;
-
-    static public void main(String[] args) throws Exception {
-        boolean timing = (args.length >= 1) && args[0].equals("--timing");
-
-        int iters = 100;
-        double probeSec;
-
-        for (;;) {
-            long t0 = System.nanoTime();
-            runAllTests(iters, false);
-            long t1 = System.nanoTime();
-
-            probeSec = (t1 - t0) / 1000000000.0;
-            if (probeSec > 0.25) {
-                break;
-            }
-
-            iters *= 2;
-        }
-
-        // Attempt to arrange for the real test to take 20 seconds.
-        iters = (int) ((iters / probeSec) * 20);
-
-        if (timing) {
-            System.out.println("iters = " + iters);
-        }
-
-        run(timing, iters);
-    }
-
-    static private enum Normalization {
-        NONE, PER_COLUMN, TOP_LEFT;
-    }
-
-    static public void printTimings(double[][] timings, Normalization norm) {
-        System.out.println();
-        System.out.printf("%-7s   A        B        C        D        E" +
-                "        F        G        H\n",
-                (norm == Normalization.NONE) ? "(usec)" : "(ratio)");
-        System.out.println("      -------- -------- -------- -------- " +
-                "-------- -------- -------- --------");
-
-        double bases[] = new double[BODIES];
-        for (int i = 0; i < BODIES; i++) {
-            double n;
-            switch (norm) {
-                case PER_COLUMN:  n = timings[i][0]; break;
-                case TOP_LEFT:    n = timings[0][0]; break;
-                default /*NONE*/: n = 1.0;           break;
-            }
-            bases[i] = n;
-        }
-
-        for (int i = 0; i < LOOPS; i++) {
-            System.out.printf("%4d: %8.3g %8.3g %8.3g %8.3g %8.3g %8.3g " +
-                    "%8.3g %8.3g\n",
-                    i,
-                    timings[0][i] / bases[0],
-                    timings[1][i] / bases[1],
-                    timings[2][i] / bases[2],
-                    timings[3][i] / bases[3],
-                    timings[4][i] / bases[4],
-                    timings[5][i] / bases[5],
-                    timings[6][i] / bases[6],
-                    timings[7][i] / bases[7]);
-        }
-    }
-
-    static public void run(boolean timing, int iters) {
-        double[][] timings = null; // assign to avoid apparent javac bug
-
-        // Try up to 5 times to get good times.
-        for (int i = 0; i < 5; i++) {
-            double[][] newTimings = runAllTests(iters, timing || (i == 0));
-
-            if (timings == null) {
-                timings = newTimings;
-            } else {
-                combineTimings(timings, newTimings, i);
-            }
-
-            if (checkTimes(timings, timing)) {
-                break;
-            }
-        }
-
-        System.out.println("Done with runs.");
-
-        boolean goodTimes = checkTimes(timings, true);
-
-        if (! goodTimes) {
-            timing = true;
-        }
-
-        if (timing) {
-            printTimings(timings, Normalization.NONE);
-            printTimings(timings, Normalization.TOP_LEFT);
-            printTimings(timings, Normalization.PER_COLUMN);
-        } else {
-            System.out.println("\nAll times are within the expected ranges.");
-        }
-    }
-
-    static public void combineTimings(double[][] target, double[][] newTimes,
-            int oldWeight) {
-        for (int i = 0; i < target.length; i++) {
-            for (int j = 0; j < target[i].length; j++) {
-                target[i][j] =
-                    ((target[i][j] * oldWeight) + newTimes[i][j])
-                    / (oldWeight + 1);
-            }
-        }
-    }
-
-    static public boolean checkTimes(double[][] timings, boolean print) {
-        // expected increase over A1
-        double[][] expected = {
-            {  1.0,  2.3,  2.4,  3.3,  6.5, 12.0, 57.0,  94.0 },
-            {  1.2,  2.4,  2.5,  3.4,  6.6, 12.2, 60.0,  95.0 },
-            {  1.5,  2.6,  2.9,  3.5,  6.7, 12.4, 63.0,  96.0 },
-            {  1.6,  2.8,  2.9,  3.6,  6.8, 12.6, 63.5,  97.0 },
-            {  1.7,  3.0,  2.9,  3.7,  6.9, 12.8, 64.0,  98.0 },
-            {  6.0,  6.0,  6.0,  7.0, 10.0, 15.0, 64.5, 105.0 },
-            { 31.0, 31.2, 31.5, 34.0, 41.0, 43.0, 91.0, 135.0 },
-        };
-
-        boolean good = true;
-
-        for (int x = 0; x < BODIES; x++) {
-            for (int y = 0; y < LOOPS; y++) {
-                double ratio = timings[x][y] / timings[0][0];
-                if (ratio > expected[y][x]) {
-                    if (print) {
-                        System.out.printf("%c%d is too slow: %.3g vs. %.3g\n",
-                                (char) (x + 'A'), y, ratio, expected[y][x]);
-                    }
-                    good = false;
-                }
-            }
-        }
-
-        return good;
-    }
-
-    static public double[][] runAllTests(int iters, boolean print) {
-        // diters is used to get usec, not nanosec; hence the extra 1000.
-        double diters = (double) iters * INNER_COUNT * 1000;
-
-        double[][] timings = new double[BODIES][LOOPS];
-        long t0, t1, t2, t3, t4, t5, t6, t7;
-
-        // Column A
-
-        if (print) {
-            System.out.println("Running A...");
-        }
-
-        t0 = System.nanoTime();
-        testA0(iters);
-        t1 = System.nanoTime();
-        testA1(iters);
-        t2 = System.nanoTime();
-        testA2(iters);
-        t3 = System.nanoTime();
-        testA3(iters);
-        t4 = System.nanoTime();
-        testA4(iters);
-        t5 = System.nanoTime();
-        testA5(iters);
-        t6 = System.nanoTime();
-        testA6(iters);
-        t7 = System.nanoTime();
-
-        timings[0][0] = (t1 - t0) / diters;
-        timings[0][1] = (t2 - t1) / diters;
-        timings[0][2] = (t3 - t2) / diters;
-        timings[0][3] = (t4 - t3) / diters;
-        timings[0][4] = (t5 - t4) / diters;
-        timings[0][5] = (t6 - t5) / diters;
-        timings[0][6] = (t7 - t6) / diters;
-
-        // Column B
-
-        if (print) {
-            System.out.println("Running B...");
-        }
-
-        t0 = System.nanoTime();
-        testB0(iters);
-        t1 = System.nanoTime();
-        testB1(iters);
-        t2 = System.nanoTime();
-        testB2(iters);
-        t3 = System.nanoTime();
-        testB3(iters);
-        t4 = System.nanoTime();
-        testB4(iters);
-        t5 = System.nanoTime();
-        testB5(iters);
-        t6 = System.nanoTime();
-        testB6(iters);
-        t7 = System.nanoTime();
-
-        timings[1][0] = (t1 - t0) / diters;
-        timings[1][1] = (t2 - t1) / diters;
-        timings[1][2] = (t3 - t2) / diters;
-        timings[1][3] = (t4 - t3) / diters;
-        timings[1][4] = (t5 - t4) / diters;
-        timings[1][5] = (t6 - t5) / diters;
-        timings[1][6] = (t7 - t6) / diters;
-
-        // Column C
-
-        if (print) {
-            System.out.println("Running C...");
-        }
-
-        t0 = System.nanoTime();
-        testC0(iters);
-        t1 = System.nanoTime();
-        testC1(iters);
-        t2 = System.nanoTime();
-        testC2(iters);
-        t3 = System.nanoTime();
-        testC3(iters);
-        t4 = System.nanoTime();
-        testC4(iters);
-        t5 = System.nanoTime();
-        testC5(iters);
-        t6 = System.nanoTime();
-        testC6(iters);
-        t7 = System.nanoTime();
-
-        timings[2][0] = (t1 - t0) / diters;
-        timings[2][1] = (t2 - t1) / diters;
-        timings[2][2] = (t3 - t2) / diters;
-        timings[2][3] = (t4 - t3) / diters;
-        timings[2][4] = (t5 - t4) / diters;
-        timings[2][5] = (t6 - t5) / diters;
-        timings[2][6] = (t7 - t6) / diters;
-
-        // Column D
-
-        if (print) {
-            System.out.println("Running D...");
-        }
-
-        t0 = System.nanoTime();
-        testD0(iters);
-        t1 = System.nanoTime();
-        testD1(iters);
-        t2 = System.nanoTime();
-        testD2(iters);
-        t3 = System.nanoTime();
-        testD3(iters);
-        t4 = System.nanoTime();
-        testD4(iters);
-        t5 = System.nanoTime();
-        testD5(iters);
-        t6 = System.nanoTime();
-        testD6(iters);
-        t7 = System.nanoTime();
-
-        timings[3][0] = (t1 - t0) / diters;
-        timings[3][1] = (t2 - t1) / diters;
-        timings[3][2] = (t3 - t2) / diters;
-        timings[3][3] = (t4 - t3) / diters;
-        timings[3][4] = (t5 - t4) / diters;
-        timings[3][5] = (t6 - t5) / diters;
-        timings[3][6] = (t7 - t6) / diters;
-
-        // Column E
-
-        if (print) {
-            System.out.println("Running E...");
-        }
-
-        t0 = System.nanoTime();
-        testE0(iters);
-        t1 = System.nanoTime();
-        testE1(iters);
-        t2 = System.nanoTime();
-        testE2(iters);
-        t3 = System.nanoTime();
-        testE3(iters);
-        t4 = System.nanoTime();
-        testE4(iters);
-        t5 = System.nanoTime();
-        testE5(iters);
-        t6 = System.nanoTime();
-        testE6(iters);
-        t7 = System.nanoTime();
-
-        timings[4][0] = (t1 - t0) / diters;
-        timings[4][1] = (t2 - t1) / diters;
-        timings[4][2] = (t3 - t2) / diters;
-        timings[4][3] = (t4 - t3) / diters;
-        timings[4][4] = (t5 - t4) / diters;
-        timings[4][5] = (t6 - t5) / diters;
-        timings[4][6] = (t7 - t6) / diters;
-
-        // Column F
-
-        if (print) {
-            System.out.println("Running F...");
-        }
-
-        t0 = System.nanoTime();
-        testF0(iters);
-        t1 = System.nanoTime();
-        testF1(iters);
-        t2 = System.nanoTime();
-        testF2(iters);
-        t3 = System.nanoTime();
-        testF3(iters);
-        t4 = System.nanoTime();
-        testF4(iters);
-        t5 = System.nanoTime();
-        testF5(iters);
-        t6 = System.nanoTime();
-        testF6(iters);
-        t7 = System.nanoTime();
-
-        timings[5][0] = (t1 - t0) / diters;
-        timings[5][1] = (t2 - t1) / diters;
-        timings[5][2] = (t3 - t2) / diters;
-        timings[5][3] = (t4 - t3) / diters;
-        timings[5][4] = (t5 - t4) / diters;
-        timings[5][5] = (t6 - t5) / diters;
-        timings[5][6] = (t7 - t6) / diters;
-
-        // Reduce the iters for the last two, since they're much slower.
-
-        iters /= 5;
-        diters /= 5;
-
-        // Column G
-
-        if (print) {
-            System.out.println("Running G...");
-        }
-
-        t0 = System.nanoTime();
-        testG0(iters);
-        t1 = System.nanoTime();
-        testG1(iters);
-        t2 = System.nanoTime();
-        testG2(iters);
-        t3 = System.nanoTime();
-        testG3(iters);
-        t4 = System.nanoTime();
-        testG4(iters);
-        t5 = System.nanoTime();
-        testG5(iters);
-        t6 = System.nanoTime();
-        testG6(iters);
-        t7 = System.nanoTime();
-
-        timings[6][0] = (t1 - t0) / diters;
-        timings[6][1] = (t2 - t1) / diters;
-        timings[6][2] = (t3 - t2) / diters;
-        timings[6][3] = (t4 - t3) / diters;
-        timings[6][4] = (t5 - t4) / diters;
-        timings[6][5] = (t6 - t5) / diters;
-        timings[6][6] = (t7 - t6) / diters;
-
-        // Column H
-
-        if (print) {
-            System.out.println("Running H...");
-        }
-
-        t0 = System.nanoTime();
-        testH0(iters);
-        t1 = System.nanoTime();
-        testH1(iters);
-        t2 = System.nanoTime();
-        testH2(iters);
-        t3 = System.nanoTime();
-        testH3(iters);
-        t4 = System.nanoTime();
-        testH4(iters);
-        t5 = System.nanoTime();
-        testH5(iters);
-        t6 = System.nanoTime();
-        testH6(iters);
-        t7 = System.nanoTime();
-
-        timings[7][0] = (t1 - t0) / diters;
-        timings[7][1] = (t2 - t1) / diters;
-        timings[7][2] = (t3 - t2) / diters;
-        timings[7][3] = (t4 - t3) / diters;
-        timings[7][4] = (t5 - t4) / diters;
-        timings[7][5] = (t6 - t5) / diters;
-        timings[7][6] = (t7 - t6) / diters;
-
-        return timings;
-    }
-
-    // Helper bits and pieces
-
-    static private final int INNER_COUNT = 100;
-    static private final int[] INNER_ARRAY = new int[INNER_COUNT];
-    static private final ArrayList<Object> INNER_LIST =
-        new ArrayList<Object>(INNER_COUNT);
-    static private final Target TARGET = new Target();
-    static private final int ARRAY_BYTES = 8192;
-    static private final byte[] BYTES_1 = new byte[ARRAY_BYTES];
-    static private final byte[] BYTES_2 = new byte[ARRAY_BYTES];
-
-    static {
-        for (int i = 0; i < INNER_COUNT; i++) {
-            INNER_LIST.add(null);
-        }
-    }
-
-    public static class Target {
-        public int value;
-        public int size = INNER_COUNT;
-
-        public void simple() {
-            // empty
-        }
-
-        public int size() {
-            return size;
-        }
-    }
-
-    // The tests themselves
-
-    static public void testA0(int iters) {
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA1(int iters) {
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA2(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA3(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                // empty
-            }
-        }
-    }
-
-    static public void testA6(int iters) {
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                // empty
-            }
-        }
-    }
-
-    static public void testB0(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB1(int iters) {
-        Target target = TARGET;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB2(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB3(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testB6(int iters) {
-        Target target = TARGET;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                target.value++;
-            }
-        }
-    }
-
-    static public void testC0(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT - 1; i >= 0; i--) {
-                array[i]++;
-            }
-        }
-    }
-
-    static public void testC1(int iters) {
-        int[] array = INNER_ARRAY;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                array[i]++;
-            }
-        }
-    }
-
-    static public void testC2(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                array[i]++;
-            }
-        }
-    }
-
-    static public void testC3(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                array[0] = i + 1;
-            }
-        }
-    }
-
-    static public void testC4(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                array[i]++;
-            }
-        }
-    }
-
-    static public void testC5(int iters) {
-        int[] array = INNER_ARRAY;
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                array[i]++;
-            }
-        }
-    }
-
-    static public void testC6(int iters) {
-        int[] array = INNER_ARRAY;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                array[0]++;
-            }
-        }
-    }
-
-    static public void testD0(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD1(int iters) {
-        Target target = TARGET;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD2(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD3(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testD6(int iters) {
-        Target target = TARGET;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                target.simple();
-            }
-        }
-    }
-
-    static public void testE0(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE1(int iters) {
-        Target target = TARGET;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE2(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE3(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testE6(int iters) {
-        Target target = TARGET;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                synchronized (target) {
-                    target.simple();
-                }
-            }
-        }
-    }
-
-    static public void testF0(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF1(int iters) {
-        Target target = TARGET;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF2(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF3(int iters) {
-        Target target = TARGET;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testF6(int iters) {
-        Target target = TARGET;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-                target.simple();
-            }
-        }
-    }
-
-    static public void testG0(int iters) {
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG1(int iters) {
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG2(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG3(int iters) {
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG4(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG5(int iters) {
-        Target target = TARGET;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testG6(int iters) {
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                new Target();
-            }
-        }
-    }
-
-    static public void testH0(int iters) {
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = INNER_COUNT; i > 0; i--) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH1(int iters) {
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-        int count = INNER_COUNT;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < count; i++) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH2(int iters) {
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < array.length; i++) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH3(int iters) {
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-        int[] array = INNER_ARRAY;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i : array) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH4(int iters) {
-        Target target = TARGET;
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size; i++) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH5(int iters) {
-        Target target = TARGET;
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (int i = 0; i < target.size(); i++) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-
-    static public void testH6(int iters) {
-        byte[] b1 = BYTES_1;
-        byte[] b2 = BYTES_2;
-        ArrayList<Object> list = INNER_LIST;
-
-        for (int outer = iters; outer > 0; outer--) {
-            for (Object o : list) {
-                System.arraycopy(b1, 0, b2, 0, ARRAY_BYTES);
-            }
-        }
-    }
-}
diff --git a/tests/061-out-of-memory/expected.txt b/tests/061-out-of-memory/expected.txt
index e1ed5da..ca87629 100644
--- a/tests/061-out-of-memory/expected.txt
+++ b/tests/061-out-of-memory/expected.txt
@@ -1,4 +1,5 @@
 tests beginning
+Got expected huge-array OOM
 testOomeLarge beginning
 testOomeLarge succeeded
 testOomeSmall beginning
diff --git a/tests/061-out-of-memory/src/Main.java b/tests/061-out-of-memory/src/Main.java
index fcf7136..b5999b3 100644
--- a/tests/061-out-of-memory/src/Main.java
+++ b/tests/061-out-of-memory/src/Main.java
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+import java.util.Arrays;
 import java.util.LinkedList;
 
 /**
@@ -22,16 +23,30 @@
 public class Main {
     public static void main(String args[]) {
         System.out.println("tests beginning");
+        testHugeArray();
         testOomeLarge();
         testOomeSmall();
         System.out.println("tests succeeded");
     }
 
+    private static void testHugeArray() {
+        try {
+            final int COUNT = 32768*32768 + 4;
+            int[] tooBig = new int[COUNT];
+
+            Arrays.fill(tooBig, 0xdd);
+        } catch (OutOfMemoryError oom) {
+            System.out.println("Got expected huge-array OOM");
+        }
+    }
+
     private static void testOomeLarge() {
         System.out.println("testOomeLarge beginning");
 
         /* Just shy of the typical max heap size so that it will actually
          * try to allocate it instead of short-circuiting.
+         *
+         * TODO: stop assuming the VM defaults to 16MB max
          */
         final int SIXTEEN_MB = (16 * 1024 * 1024 - 32);
 
@@ -56,6 +71,8 @@
     /* Do this in another method so that the GC has a chance of freeing the
      * list afterwards.  Even if we null out list when we're done, the conservative
      * GC may see a stale pointer to it in a register.
+     *
+     * TODO: stop assuming the VM defaults to 16MB max
      */
     private static boolean testOomeSmallInternal() {
         final int SIXTEEN_MB = (16 * 1024 * 1024);
diff --git a/tests/089-jumbo-opcodes/build b/tests/089-jumbo-opcodes/build
new file mode 100644
index 0000000..c8130d2
--- /dev/null
+++ b/tests/089-jumbo-opcodes/build
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out files with 65500 total static fields, instance fields, and methods,
+# so that references to the Jumbo fields in Main will be guaranteed to be jumbo
+# instructions.
+
+awk '
+BEGIN {
+    writeFileField("FillerStatic", "static public int staticInt");
+    writeFileField("FillerField", "public int fieldInt");
+    writeFileMethod("FillerMethod");
+}
+function writeFileField(name, type) {
+    fileName = "src/" name ".java";
+    printf("public class %s {\n", name) > fileName;
+    for (i = 1; i <= 65500; i++) {
+        printf("    %s%d;\n", type, i) > fileName;
+    }
+    printf("}\n") > fileName;
+}
+function writeFileMethod(name) {
+    fileName = "src/" name ".java";
+    printf("public class %s {\n", name) > fileName;
+    for (i = 1; i <= 65500; i++) {
+      printf("    public void meth%d() { }\n", i) > fileName;
+    }
+    printf("}\n") > fileName;
+}'
+
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+${JAVAC} -d classes `find src2 -name '*.java'`
+
+dx -JXmx1024m --debug --dex --no-optimize --positions=none --no-locals \
+    --dump-to=classes.lst --output=classes.dex classes
+zip test.jar classes.dex
diff --git a/tests/089-jumbo-opcodes/expected.txt b/tests/089-jumbo-opcodes/expected.txt
new file mode 100644
index 0000000..253547c
--- /dev/null
+++ b/tests/089-jumbo-opcodes/expected.txt
@@ -0,0 +1,10 @@
+Invoked virtual
+Invoked super
+Invoked direct
+Invoked static
+Invoked interface
+Got expected InstantationError
+Got expected NoSuchFieldError
+Got expected NoSuchFieldError
+Got expected NoSuchMethodError
+Got expected NoSuchMethodError
diff --git a/tests/089-jumbo-opcodes/info.txt b/tests/089-jumbo-opcodes/info.txt
new file mode 100644
index 0000000..56f9624
--- /dev/null
+++ b/tests/089-jumbo-opcodes/info.txt
@@ -0,0 +1,4 @@
+Test basic functionality of jumbo opcodes. Note that check-cast/jumbo and
+filled-new-array/jumbo can't be generated by dx currently (because dx can't
+support more than 65536 classes yet, and it always uses low registers for the
+result of these instructions).
diff --git a/tests/089-jumbo-opcodes/src/Main.java b/tests/089-jumbo-opcodes/src/Main.java
new file mode 100644
index 0000000..44aa133
--- /dev/null
+++ b/tests/089-jumbo-opcodes/src/Main.java
@@ -0,0 +1,793 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import other.Mutant;
+
+/*
+ * Entry point and tests that are expected to succeed.
+ */
+public class Main {
+
+    /**
+     * Drives tests.
+     */
+    public static void main(String[] args) {
+
+        // Test static put/get
+        testStaticInt();
+        testStaticWide();
+        testStaticObject();
+        testStaticBoolean();
+        testStaticByte();
+        testStaticChar();
+        testStaticShort();
+
+        // Test field put/get
+        JumboField fieldTest = new JumboField();
+        testFieldInt(fieldTest);
+        testFieldWide(fieldTest);
+        testFieldObject(fieldTest);
+        testFieldBoolean(fieldTest);
+        testFieldByte(fieldTest);
+        testFieldChar(fieldTest);
+        testFieldShort(fieldTest);
+
+        // Test method invokes
+        JumboMethod methodTest = new JumboMethod();
+        methodTest.testMethods();
+
+        // Test remaining jumbo instructions
+        // const-class/jumbo, check-cast/jumbo, instance-of/jumbo,
+        // new-instance/jumbo, new-array/jumbo, filled-new-array/jumbo
+        // throw-verification-error/jumbo
+        JumboRegister registerTest = new JumboRegister();
+        registerTest.testRegisters();
+    }
+
+    // Test sput/jumbo & sget/jumbo
+    public static void testStaticInt() {
+        int putInt = 0x12345678;
+        JumboStatic.testInt = putInt;
+        int getInt = JumboStatic.testInt;
+        if (putInt != getInt) {
+            System.out.println("Static put int: " + putInt +
+                " does not match static get int: " + getInt);
+        }
+    }
+
+    // Test sput-wide/jumbo & sget-wide/jumbo
+    public static void testStaticWide() {
+        long putWide = 0xfedcba9876543210l;
+        JumboStatic.testWide = putWide;
+        long getWide = JumboStatic.testWide;
+        if (putWide != getWide) {
+            System.out.println("Static put wide: " + putWide +
+                " does not match static get wide: " + getWide);
+        }
+    }
+
+    // Test sput-object/jumbo & sget-object/jumbo
+    public static void testStaticObject() {
+        Object putObject = new Object();
+        JumboStatic.testObject = putObject;
+        Object getObject = JumboStatic.testObject;
+        if (putObject != getObject) {
+            System.out.println("Static put object: " + putObject +
+                " does not match static get object: " + getObject);
+        }
+    }
+
+    // Test sput-boolean/jumbo & sget-boolean/jumbo
+    public static void testStaticBoolean() {
+        boolean putBoolean = true;
+        JumboStatic.testBoolean = putBoolean;
+        boolean getBoolean = JumboStatic.testBoolean;
+        if (putBoolean != getBoolean) {
+            System.out.println("Static put boolean: " + putBoolean +
+                " does not match static get boolean: " + getBoolean);
+        }
+    }
+
+    // Test sput-byte/jumbo & sget-byte/jumbo
+    public static void testStaticByte() {
+        byte putByte = 0x6D;
+        JumboStatic.testByte = putByte;
+        byte getByte = JumboStatic.testByte;
+        if (putByte != getByte) {
+            System.out.println("Static put byte: " + putByte +
+                " does not match static get byte: " + getByte);
+        }
+    }
+
+    // Test sput-char/jumbo & sget-char/jumbo
+    public static void testStaticChar() {
+        char putChar = 0xE5;
+        JumboStatic.testChar = putChar;
+        char getChar = JumboStatic.testChar;
+        if (putChar != getChar) {
+            System.out.println("Static put char: " + putChar +
+                " does not match static get char: " + getChar);
+        }
+    }
+
+    // Test sput-short/jumbo & sget-short/jumbo
+    public static void testStaticShort() {
+        short putShort = 0x7A3B;
+        JumboStatic.testShort = putShort;
+        short getShort = JumboStatic.testShort;
+        if (putShort != getShort) {
+            System.out.println("Static put short: " + putShort +
+                " does not match static get short: " + getShort);
+        }
+    }
+
+    // Test iput/jumbo & iget/jumbo
+    public static void testFieldInt(JumboField fieldTest) {
+        int putInt = 0x12345678;
+        fieldTest.testInt = putInt;
+        int getInt = fieldTest.testInt;
+        if (putInt != getInt) {
+            System.out.println("Field put int: " + putInt +
+                " does not match field get int: " + getInt);
+        }
+    }
+
+    // Test iput-wide/jumbo & iget-wide/jumbo
+    public static void testFieldWide(JumboField fieldTest) {
+        long putWide = 0xfedcba9876543210l;
+        fieldTest.testWide = putWide;
+        long getWide = fieldTest.testWide;
+        if (putWide != getWide) {
+            System.out.println("Field put wide: " + putWide +
+                " does not match field get wide: " + getWide);
+        }
+    }
+
+    // Test iput-object/jumbo & iget-object/jumbo
+    public static void testFieldObject(JumboField fieldTest) {
+        Object putObject = new Object();
+        fieldTest.testObject = putObject;
+        Object getObject = fieldTest.testObject;
+        if (putObject != getObject) {
+            System.out.println("Field put object: " + putObject +
+                " does not match field get object: " + getObject);
+        }
+    }
+
+    // Test iput-boolean/jumbo & iget-boolean/jumbo
+    public static void testFieldBoolean(JumboField fieldTest) {
+        boolean putBoolean = true;
+        fieldTest.testBoolean = putBoolean;
+        boolean getBoolean = fieldTest.testBoolean;
+        if (putBoolean != getBoolean) {
+            System.out.println("Field put boolean: " + putBoolean +
+                " does not match field get boolean: " + getBoolean);
+        }
+    }
+
+    // Test iput-byte/jumbo & iget-byte/jumbo
+    public static void testFieldByte(JumboField fieldTest) {
+        byte putByte = 0x6D;
+        fieldTest.testByte = putByte;
+        byte getByte = fieldTest.testByte;
+        if (putByte != getByte) {
+            System.out.println("Field put byte: " + putByte +
+                " does not match field get byte: " + getByte);
+        }
+    }
+
+    // Test iput-char/jumbo & iget-char/jumbo
+    public static void testFieldChar(JumboField fieldTest) {
+        char putChar = 0xE5;
+        fieldTest.testChar = putChar;
+        char getChar = fieldTest.testChar;
+        if (putChar != getChar) {
+            System.out.println("Field put char: " + putChar +
+                " does not match field get char: " + getChar);
+        }
+    }
+
+    // Test iput-short/jumbo & iget-short/jumbo
+    public static void testFieldShort(JumboField fieldTest) {
+        short putShort = 0x7A3B;
+        fieldTest.testShort = putShort;
+        short getShort = fieldTest.testShort;
+        if (putShort != getShort) {
+            System.out.println("Field put short: " + putShort +
+                " does not match field get short: " + getShort);
+        }
+    }
+}
+
+class JumboStatic {
+    static int staticInt1;
+    static int staticInt2;
+    static int staticInt3;
+    static int staticInt4;
+    static int staticInt5;
+    static int staticInt6;
+    static int staticInt7;
+    static int staticInt8;
+    static int staticInt9;
+    static int staticInt10;
+    static int staticInt11;
+    static int staticInt12;
+    static int staticInt13;
+    static int staticInt14;
+    static int staticInt15;
+    static int staticInt16;
+    static int staticInt17;
+    static int staticInt18;
+    static int staticInt19;
+    static int staticInt20;
+    static int staticInt21;
+    static int staticInt22;
+    static int staticInt23;
+    static int staticInt24;
+    static int staticInt25;
+    static int staticInt26;
+    static int staticInt27;
+    static int staticInt28;
+    static int staticInt29;
+    static int staticInt30;
+    static int staticInt31;
+    static int staticInt32;
+    static int staticInt33;
+    static int staticInt34;
+    static int staticInt35;
+    static int staticInt36;
+    static int staticInt37;
+    static int staticInt38;
+    static int staticInt39;
+    static int staticInt40;
+    static int staticInt41;
+    static int staticInt42;
+    static int staticInt43;
+    static int staticInt44;
+    static int staticInt45;
+    static int staticInt46;
+    static int staticInt47;
+    static int staticInt48;
+    static int staticInt49;
+    static int staticInt50;
+
+    static int     testInt;
+    static long    testWide;
+    static Object  testObject;
+    static boolean testBoolean;
+    static byte    testByte;
+    static char    testChar;
+    static short   testShort;
+}
+
+class JumboField {
+    int fieldInt1;
+    int fieldInt2;
+    int fieldInt3;
+    int fieldInt4;
+    int fieldInt5;
+    int fieldInt6;
+    int fieldInt7;
+    int fieldInt8;
+    int fieldInt9;
+    int fieldInt10;
+    int fieldInt11;
+    int fieldInt12;
+    int fieldInt13;
+    int fieldInt14;
+    int fieldInt15;
+    int fieldInt16;
+    int fieldInt17;
+    int fieldInt18;
+    int fieldInt19;
+    int fieldInt20;
+    int fieldInt21;
+    int fieldInt22;
+    int fieldInt23;
+    int fieldInt24;
+    int fieldInt25;
+    int fieldInt26;
+    int fieldInt27;
+    int fieldInt28;
+    int fieldInt29;
+    int fieldInt30;
+    int fieldInt31;
+    int fieldInt32;
+    int fieldInt33;
+    int fieldInt34;
+    int fieldInt35;
+    int fieldInt36;
+    int fieldInt37;
+    int fieldInt38;
+    int fieldInt39;
+    int fieldInt40;
+    int fieldInt41;
+    int fieldInt42;
+    int fieldInt43;
+    int fieldInt44;
+    int fieldInt45;
+    int fieldInt46;
+    int fieldInt47;
+    int fieldInt48;
+    int fieldInt49;
+    int fieldInt50;
+
+    int     testInt;
+    long    testWide;
+    Object  testObject;
+    boolean testBoolean;
+    byte    testByte;
+    char    testChar;
+    short   testShort;
+}
+
+class JumboMethodSuper {
+    void testSuper() {
+        System.out.println("Invoked super");
+    }
+}
+
+interface JumboMethodInterface {
+    void testInterface();
+}
+
+class JumboMethod extends JumboMethodSuper implements JumboMethodInterface {
+    void meth1() { }
+    void meth2() { }
+    void meth3() { }
+    void meth4() { }
+    void meth5() { }
+    void meth6() { }
+    void meth7() { }
+    void meth8() { }
+    void meth9() { }
+    void meth10() { }
+    void meth11() { }
+    void meth12() { }
+    void meth13() { }
+    void meth14() { }
+    void meth15() { }
+    void meth16() { }
+    void meth17() { }
+    void meth18() { }
+    void meth19() { }
+    void meth20() { }
+    void meth21() { }
+    void meth22() { }
+    void meth23() { }
+    void meth24() { }
+    void meth25() { }
+    void meth26() { }
+    void meth27() { }
+    void meth28() { }
+    void meth29() { }
+    void meth30() { }
+    void meth31() { }
+    void meth32() { }
+    void meth33() { }
+    void meth34() { }
+    void meth35() { }
+    void meth36() { }
+    void meth37() { }
+    void meth38() { }
+    void meth39() { }
+    void meth40() { }
+    void meth41() { }
+    void meth42() { }
+    void meth43() { }
+    void meth44() { }
+    void meth45() { }
+    void meth46() { }
+    void meth47() { }
+    void meth48() { }
+    void meth49() { }
+    void meth50() { }
+
+    void testMethods() {
+        testVirtual();
+        super.testSuper();
+        testDirect();
+        testStatic();
+        ((JumboMethodInterface) this).testInterface();
+    }
+
+    void testVirtual() {
+        System.out.println("Invoked virtual");
+    }
+
+    void testSuper() {
+        System.out.println("Invoked base");
+    }
+
+    private void testDirect() {
+        System.out.println("Invoked direct");
+    }
+
+    static void testStatic() {
+        System.out.println("Invoked static");
+    }
+
+    public void testInterface() {
+        System.out.println("Invoked interface");
+    }
+}
+
+class JumboRegister {
+    void testRegisters() {
+        // Create a bunch of registers
+        Class c1 = Thread.class;
+        Class c2 = Thread.class;
+        Class c3 = Thread.class;
+        Class c4 = Thread.class;
+        Class c5 = Thread.class;
+        Class c6 = Thread.class;
+        Class c7 = Thread.class;
+        Class c8 = Thread.class;
+        Class c9 = Thread.class;
+        Class c10 = Thread.class;
+        Class c11 = Thread.class;
+        Class c12 = Thread.class;
+        Class c13 = Thread.class;
+        Class c14 = Thread.class;
+        Class c15 = Thread.class;
+        Class c16 = Thread.class;
+        Class c17 = Thread.class;
+        Class c18 = Thread.class;
+        Class c19 = Thread.class;
+        Class c20 = Thread.class;
+        Class c21 = Thread.class;
+        Class c22 = Thread.class;
+        Class c23 = Thread.class;
+        Class c24 = Thread.class;
+        Class c25 = Thread.class;
+        Class c26 = Thread.class;
+        Class c27 = Thread.class;
+        Class c28 = Thread.class;
+        Class c29 = Thread.class;
+        Class c30 = Thread.class;
+        Class c31 = Thread.class;
+        Class c32 = Thread.class;
+        Class c33 = Thread.class;
+        Class c34 = Thread.class;
+        Class c35 = Thread.class;
+        Class c36 = Thread.class;
+        Class c37 = Thread.class;
+        Class c38 = Thread.class;
+        Class c39 = Thread.class;
+        Class c40 = Thread.class;
+        Class c41 = Thread.class;
+        Class c42 = Thread.class;
+        Class c43 = Thread.class;
+        Class c44 = Thread.class;
+        Class c45 = Thread.class;
+        Class c46 = Thread.class;
+        Class c47 = Thread.class;
+        Class c48 = Thread.class;
+        Class c49 = Thread.class;
+        Class c50 = Thread.class;
+        Class c51 = Thread.class;
+        Class c52 = Thread.class;
+        Class c53 = Thread.class;
+        Class c54 = Thread.class;
+        Class c55 = Thread.class;
+        Class c56 = Thread.class;
+        Class c57 = Thread.class;
+        Class c58 = Thread.class;
+        Class c59 = Thread.class;
+        Class c60 = Thread.class;
+        Class c61 = Thread.class;
+        Class c62 = Thread.class;
+        Class c63 = Thread.class;
+        Class c64 = Thread.class;
+        Class c65 = Thread.class;
+        Class c66 = Thread.class;
+        Class c67 = Thread.class;
+        Class c68 = Thread.class;
+        Class c69 = Thread.class;
+        Class c70 = Thread.class;
+        Class c71 = Thread.class;
+        Class c72 = Thread.class;
+        Class c73 = Thread.class;
+        Class c74 = Thread.class;
+        Class c75 = Thread.class;
+        Class c76 = Thread.class;
+        Class c77 = Thread.class;
+        Class c78 = Thread.class;
+        Class c79 = Thread.class;
+        Class c80 = Thread.class;
+        Class c81 = Thread.class;
+        Class c82 = Thread.class;
+        Class c83 = Thread.class;
+        Class c84 = Thread.class;
+        Class c85 = Thread.class;
+        Class c86 = Thread.class;
+        Class c87 = Thread.class;
+        Class c88 = Thread.class;
+        Class c89 = Thread.class;
+        Class c90 = Thread.class;
+        Class c91 = Thread.class;
+        Class c92 = Thread.class;
+        Class c93 = Thread.class;
+        Class c94 = Thread.class;
+        Class c95 = Thread.class;
+        Class c96 = Thread.class;
+        Class c97 = Thread.class;
+        Class c98 = Thread.class;
+        Class c99 = Thread.class;
+        Class c100 = Thread.class;
+        Class c101 = Thread.class;
+        Class c102 = Thread.class;
+        Class c103 = Thread.class;
+        Class c104 = Thread.class;
+        Class c105 = Thread.class;
+        Class c106 = Thread.class;
+        Class c107 = Thread.class;
+        Class c108 = Thread.class;
+        Class c109 = Thread.class;
+        Class c110 = Thread.class;
+        Class c111 = Thread.class;
+        Class c112 = Thread.class;
+        Class c113 = Thread.class;
+        Class c114 = Thread.class;
+        Class c115 = Thread.class;
+        Class c116 = Thread.class;
+        Class c117 = Thread.class;
+        Class c118 = Thread.class;
+        Class c119 = Thread.class;
+        Class c120 = Thread.class;
+        Class c121 = Thread.class;
+        Class c122 = Thread.class;
+        Class c123 = Thread.class;
+        Class c124 = Thread.class;
+        Class c125 = Thread.class;
+        Class c126 = Thread.class;
+        Class c127 = Thread.class;
+        Class c128 = Thread.class;
+        Class c129 = Thread.class;
+        Class c130 = Thread.class;
+        Class c131 = Thread.class;
+        Class c132 = Thread.class;
+        Class c133 = Thread.class;
+        Class c134 = Thread.class;
+        Class c135 = Thread.class;
+        Class c136 = Thread.class;
+        Class c137 = Thread.class;
+        Class c138 = Thread.class;
+        Class c139 = Thread.class;
+        Class c140 = Thread.class;
+        Class c141 = Thread.class;
+        Class c142 = Thread.class;
+        Class c143 = Thread.class;
+        Class c144 = Thread.class;
+        Class c145 = Thread.class;
+        Class c146 = Thread.class;
+        Class c147 = Thread.class;
+        Class c148 = Thread.class;
+        Class c149 = Thread.class;
+        Class c150 = Thread.class;
+        Class c151 = Thread.class;
+        Class c152 = Thread.class;
+        Class c153 = Thread.class;
+        Class c154 = Thread.class;
+        Class c155 = Thread.class;
+        Class c156 = Thread.class;
+        Class c157 = Thread.class;
+        Class c158 = Thread.class;
+        Class c159 = Thread.class;
+        Class c160 = Thread.class;
+        Class c161 = Thread.class;
+        Class c162 = Thread.class;
+        Class c163 = Thread.class;
+        Class c164 = Thread.class;
+        Class c165 = Thread.class;
+        Class c166 = Thread.class;
+        Class c167 = Thread.class;
+        Class c168 = Thread.class;
+        Class c169 = Thread.class;
+        Class c170 = Thread.class;
+        Class c171 = Thread.class;
+        Class c172 = Thread.class;
+        Class c173 = Thread.class;
+        Class c174 = Thread.class;
+        Class c175 = Thread.class;
+        Class c176 = Thread.class;
+        Class c177 = Thread.class;
+        Class c178 = Thread.class;
+        Class c179 = Thread.class;
+        Class c180 = Thread.class;
+        Class c181 = Thread.class;
+        Class c182 = Thread.class;
+        Class c183 = Thread.class;
+        Class c184 = Thread.class;
+        Class c185 = Thread.class;
+        Class c186 = Thread.class;
+        Class c187 = Thread.class;
+        Class c188 = Thread.class;
+        Class c189 = Thread.class;
+        Class c190 = Thread.class;
+        Class c191 = Thread.class;
+        Class c192 = Thread.class;
+        Class c193 = Thread.class;
+        Class c194 = Thread.class;
+        Class c195 = Thread.class;
+        Class c196 = Thread.class;
+        Class c197 = Thread.class;
+        Class c198 = Thread.class;
+        Class c199 = Thread.class;
+        Class c200 = Thread.class;
+        Class c201 = Thread.class;
+        Class c202 = Thread.class;
+        Class c203 = Thread.class;
+        Class c204 = Thread.class;
+        Class c205 = Thread.class;
+        Class c206 = Thread.class;
+        Class c207 = Thread.class;
+        Class c208 = Thread.class;
+        Class c209 = Thread.class;
+        Class c210 = Thread.class;
+        Class c211 = Thread.class;
+        Class c212 = Thread.class;
+        Class c213 = Thread.class;
+        Class c214 = Thread.class;
+        Class c215 = Thread.class;
+        Class c216 = Thread.class;
+        Class c217 = Thread.class;
+        Class c218 = Thread.class;
+        Class c219 = Thread.class;
+        Class c220 = Thread.class;
+        Class c221 = Thread.class;
+        Class c222 = Thread.class;
+        Class c223 = Thread.class;
+        Class c224 = Thread.class;
+        Class c225 = Thread.class;
+        Class c226 = Thread.class;
+        Class c227 = Thread.class;
+        Class c228 = Thread.class;
+        Class c229 = Thread.class;
+        Class c230 = Thread.class;
+        Class c231 = Thread.class;
+        Class c232 = Thread.class;
+        Class c233 = Thread.class;
+        Class c234 = Thread.class;
+        Class c235 = Thread.class;
+        Class c236 = Thread.class;
+        Class c237 = Thread.class;
+        Class c238 = Thread.class;
+        Class c239 = Thread.class;
+        Class c240 = Thread.class;
+        Class c241 = Thread.class;
+        Class c242 = Thread.class;
+        Class c243 = Thread.class;
+        Class c244 = Thread.class;
+        Class c245 = Thread.class;
+        Class c246 = Thread.class;
+        Class c247 = Thread.class;
+        Class c248 = Thread.class;
+        Class c249 = Thread.class;
+        Class c250 = Thread.class;
+        Class c251 = Thread.class;
+        Class c252 = Thread.class;
+        Class c253 = Thread.class;
+        Class c254 = Thread.class;
+        Class c255 = Thread.class;
+
+        // Test const-class/jumbo
+        Class c256 = Thread.class;
+
+        // Test check-cast/jumbo
+
+        // Test instance-of/jumbo
+        boolean b1 = c1 instanceof Object;
+        if (!b1) System.out.println("instance-of/jumbo returned wrong result");
+
+        // Test new-instance/jumbo
+        Object o1 = new Object();
+
+        // Test new-array/jumbo
+        int[] a1 = new int[10];
+        a1[0] = 1;
+        a1[1] = 2;
+        a1[2] = 3;
+        a1[3] = 4;
+        a1[4] = 5;
+        a1[5] = 6;
+        a1[6] = 7;
+        a1[7] = 8;
+        a1[8] = 9;
+        a1[9] = 10;
+
+        // Test filled-new-array/jumbo
+
+        // Test throw-verification-error/jumbo
+        try {
+            MaybeAbstract ma = new MaybeAbstract();
+            System.err.println("ERROR: MaybeAbstract succeeded unexpectedly");
+        } catch (InstantiationError ie) {
+            System.out.println("Got expected InstantationError");
+        } catch (Exception ex) {
+            System.err.println("Got unexpected MaybeAbstract failure");
+        }
+        testMissingStuff();
+
+        // Do something with those registers to force other ops to be jumbo
+        useRegs(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
+        useRegs(c11, c12, c13, c14, c15, c16, c17, c18, c19, c20);
+        useRegs(c21, c22, c23, c24, c25, c26, c27, c28, c29, c30);
+        useRegs(c31, c32, c33, c34, c35, c36, c37, c38, c39, c40);
+        useRegs(c41, c42, c43, c44, c45, c46, c47, c48, c49, c50);
+        useRegs(c51, c52, c53, c54, c55, c56, c57, c58, c59, c60);
+        useRegs(c61, c62, c63, c64, c65, c66, c67, c68, c69, c70);
+        useRegs(c71, c72, c73, c74, c75, c76, c77, c78, c79, c80);
+        useRegs(c81, c82, c83, c84, c85, c86, c87, c88, c89, c90);
+        useRegs(c91, c92, c93, c94, c95, c96, c97, c98, c99, c100);
+        useRegs(c101, c102, c103, c104, c105, c106, c107, c108, c109, c110);
+        useRegs(c111, c112, c113, c114, c115, c116, c117, c118, c119, c120);
+        useRegs(c121, c122, c123, c124, c125, c126, c127, c128, c129, c130);
+        useRegs(c131, c132, c133, c134, c135, c136, c137, c138, c139, c140);
+        useRegs(c141, c142, c143, c144, c145, c146, c147, c148, c149, c150);
+        useRegs(c151, c152, c153, c154, c155, c156, c157, c158, c159, c160);
+        useRegs(c161, c162, c163, c164, c165, c166, c167, c168, c169, c170);
+        useRegs(c171, c172, c173, c174, c175, c176, c177, c178, c179, c180);
+        useRegs(c181, c182, c183, c184, c185, c186, c187, c188, c189, c190);
+        useRegs(c191, c192, c193, c194, c195, c196, c197, c198, c199, c200);
+        useRegs(c201, c202, c203, c204, c205, c206, c207, c208, c209, c210);
+        useRegs(c211, c212, c213, c214, c215, c216, c217, c218, c219, c220);
+        useRegs(c221, c222, c223, c224, c225, c226, c227, c228, c229, c230);
+        useRegs(c231, c232, c233, c234, c235, c236, c237, c238, c239, c240);
+        useRegs(c241, c242, c243, c244, c245, c246, c247, c248, c249, c250);
+        useRegs(c251, c252, c253, c254, c255, c256, c256, c256, c256, c256);
+
+        useRegs(b1);
+        useRegs(o1);
+        useRegs(a1);
+    }
+
+    // Trigger more jumbo verification errors
+    static void testMissingStuff() {
+        Mutant mutant = new Mutant();
+
+        try {
+            int x = mutant.disappearingField;
+        } catch (NoSuchFieldError nsfe) {
+            System.out.println("Got expected NoSuchFieldError");
+        }
+
+        try {
+            int y = Mutant.disappearingStaticField;
+        } catch (NoSuchFieldError nsfe) {
+            System.out.println("Got expected NoSuchFieldError");
+        }
+
+        try {
+            mutant.disappearingMethod();
+        } catch (NoSuchMethodError nsme) {
+            System.out.println("Got expected NoSuchMethodError");
+        }
+
+        try {
+            Mutant.disappearingStaticMethod();
+        } catch (NoSuchMethodError nsme) {
+            System.out.println("Got expected NoSuchMethodError");
+        }
+    }
+
+    void useRegs(Object o1, Object o2, Object o3, Object o4, Object o5,
+        Object o6, Object o7, Object o8, Object o9, Object o10) {
+    }
+
+    void useRegs(Object o1) { }
+    void useRegs(boolean b1) { }
+}
diff --git a/tests/089-jumbo-opcodes/src/MaybeAbstract.java b/tests/089-jumbo-opcodes/src/MaybeAbstract.java
new file mode 100644
index 0000000..6d3b05b
--- /dev/null
+++ b/tests/089-jumbo-opcodes/src/MaybeAbstract.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public /*abstract*/ class MaybeAbstract {
+    public MaybeAbstract() {}
+    int foo() { return 0; }
+}
diff --git a/tests/089-jumbo-opcodes/src/other/Mutant.java b/tests/089-jumbo-opcodes/src/other/Mutant.java
new file mode 100644
index 0000000..ec4754b
--- /dev/null
+++ b/tests/089-jumbo-opcodes/src/other/Mutant.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Parts of this class will disappear or change form.
+ */
+public class Mutant {
+    public int disappearingField = 3;
+    public static int disappearingStaticField = 4;
+
+    public void disappearingMethod() {
+        System.out.println("bye");
+    }
+    public static void disappearingStaticMethod() {
+        System.out.println("kthxbai");
+    }
+
+    public int inaccessibleField = 5;
+    public static int inaccessibleStaticField = 6;
+
+    public void inaccessibleMethod() {
+        System.out.println("no");
+    }
+
+    public static void inaccessibleStaticMethod() {
+        System.out.println("nay");
+    }
+}
diff --git a/tests/089-jumbo-opcodes/src2/MaybeAbstract.java b/tests/089-jumbo-opcodes/src2/MaybeAbstract.java
new file mode 100644
index 0000000..8b70a07
--- /dev/null
+++ b/tests/089-jumbo-opcodes/src2/MaybeAbstract.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public abstract class MaybeAbstract {
+    public MaybeAbstract() {}
+    int foo() { return 0; }
+}
diff --git a/tests/089-jumbo-opcodes/src2/other/Mutant.java b/tests/089-jumbo-opcodes/src2/other/Mutant.java
new file mode 100644
index 0000000..67cd36d
--- /dev/null
+++ b/tests/089-jumbo-opcodes/src2/other/Mutant.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Parts of this class will disappear or change form.
+ */
+public class Mutant {
+    //public int disappearingField = 3;
+    //public static int disappearingStaticField = 4;
+
+    //public static void disappearingMethod() {
+    //    System.out.println("bye");
+    //}
+    //public static void disappearingStaticMethod() {
+    //    System.out.println("kthxbai");
+    //}
+
+    protected int inaccessibleField = 5;
+    protected static int inaccessibleStaticField = 6;
+
+    protected void inaccessibleMethod() {
+        System.out.println("no");
+    }
+
+    protected static void inaccessibleStaticMethod() {
+        System.out.println("nay");
+    }
+}
diff --git a/tests/etc/host-run-test-jar b/tests/etc/host-run-test-jar
index cedaee5..d3c0fd5 100755
--- a/tests/etc/host-run-test-jar
+++ b/tests/etc/host-run-test-jar
@@ -123,6 +123,11 @@
 DATA_DIR=/tmp
 DEBUG_OPTS="-Xcheck:jni -Xrunjdwp:transport=dt_socket,address=8000,server=y,suspend=n"
 
+if [ ! -d $DATA_DIR/dalvik-cache ]; then
+    mkdir -p $DATA_DIR/dalvik-cache
+    [[ $? -ne 0 ]] && exit
+fi
+
 export ANDROID_PRINTF_LOG=brief
 if [ "$DEV_MODE" = "y" ]; then
     export ANDROID_LOG_TAGS='*:d'
diff --git a/tests/etc/push-and-run-test-jar b/tests/etc/push-and-run-test-jar
index df66a8e..e2fde42 100755
--- a/tests/etc/push-and-run-test-jar
+++ b/tests/etc/push-and-run-test-jar
@@ -10,7 +10,7 @@
 #   --portable    -- use the portable interpreter
 #   --debug       -- wait for debugger to attach
 #   --zygote      -- use the zygote (if so, all other options are ignored)
-#   --dev         -- development mode
+#   --dev         -- development mode (print the vm invocation cmdline)
 #   --no-verify   -- turn off verification (on by default)
 #   --no-optimize -- turn off optimization (on by default)
 #   --no-precise  -- turn off precise GC (on by default)
@@ -28,6 +28,7 @@
 ZYGOTE="n"
 QUIET="n"
 PRECISE="y"
+DEV_MODE="n"
 
 while true; do
     if [ "x$1" = "x--quiet" ]; then
@@ -53,7 +54,7 @@
         msg "Spawning from zygote"
         shift
     elif [ "x$1" = "x--dev" ]; then
-        # not used; ignore
+        DEV_MODE="y"
         shift
     elif [ "x$1" = "x--no-verify" ]; then
         VERIFY="n"
@@ -125,6 +126,10 @@
 if [ "$ZYGOTE" = "y" ]; then
     adb shell cd /data \; dvz -classpath test.jar Main "$@"
 else
-    adb shell cd /data \; dalvikvm $DEX_VERIFY $DEX_OPTIMIZE $DEX_DEBUG \
-        $GC_OPTS -cp test.jar "-Xint:${INTERP}" -ea Main "$@"
+    cmdline="cd /data; dalvikvm $DEX_VERIFY $DEX_OPTIMIZE $DEX_DEBUG \
+        $GC_OPTS -cp test.jar -Xint:${INTERP} -ea Main"
+    if [ "$DEV_MODE" = "y" ]; then
+        echo $cmdline "$@"
+    fi
+    adb shell $cmdline "$@"
 fi
diff --git a/tools/dmtracedump/TraceDump.c b/tools/dmtracedump/TraceDump.c
index 3f80064..4127530 100644
--- a/tools/dmtracedump/TraceDump.c
+++ b/tools/dmtracedump/TraceDump.c
@@ -1,19 +1,18 @@
-/* //device/tools/dmtracedump/TraceDump.c
-**
-** Copyright 2006, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 /*
  * Process dmtrace output.
@@ -47,18 +46,6 @@
 /* Size of temporary buffers for escaping html strings */
 #define HTML_BUFSIZE 10240
 
-/* Size of methodId->method cache */
-#define METHOD_CACHE_SIZE 2048
-#define METHOD_CACHE_SIZE_MASK (METHOD_CACHE_SIZE - 1)
-
-/* Some filter constants */
-#define FILTER_TAG '*'
-#define FILTER_FLAG_THREAD '+'
-#define FILTER_TYPE_CLASS 0
-#define FILTER_TYPE_METHOD 1
-
-#define DEFAULT_ACTIVE_THREADS 8
-
 char *htmlHeader =
 "<html>\n<head>\n<script type=\"text/javascript\" src=\"%ssortable.js\"></script>\n"
 "<script langugage=\"javascript\">\n"
@@ -140,7 +127,6 @@
 typedef struct ThreadEntry {
     int         threadId;
     const char* threadName;
-    uint64_t    elapsedTime;
 } ThreadEntry;
 
 struct MethodEntry;
@@ -198,8 +184,6 @@
     ThreadEntry* threads;
     int          numMethods;
     MethodEntry* methods;       /* 2 extra methods: "toplevel" and "unknown" */
-    int*         methodCache;   /* methodId->methodIndex mapping */
-    // TODO change to map methodId->method itself
 } DataKeys;
 
 #define TOPLEVEL_INDEX 0
@@ -211,15 +195,10 @@
 } StackEntry;
 
 typedef struct CallStack {
-    int           top;
-    StackEntry    calls[MAX_STACK_DEPTH];
-    uint64_t      lastEventTime;
-    uint64_t      threadStartTime;
-    uint64_t*     remTimes;
-    // Note: remTimes keeps a sum of 'un-allocated' time for each thread, in case
-    // we need to allocate it to one (or many) filter later. This would happen when
-    // we see a method exit that maches a filter, but whose entry we hadn't seen.
-    // TODO: consider moving remTimes into FilterTimes and change logic appropriately
+    int         top;
+    StackEntry  calls[MAX_STACK_DEPTH];
+    uint64_t    lastEventTime;
+    uint64_t    threadStartTime;
 } CallStack;
 
 typedef struct DiffEntry {
@@ -236,7 +215,6 @@
     const char* traceFileName;
     const char* diffFileName;
     const char* graphFileName;
-    const char* filterFileName;
     int keepDotFile;
     int dump;
     int outputHtml;
@@ -253,31 +231,6 @@
     UniqueMethodEntry *uniqueMethods;
 } TraceData;
 
-typedef struct FilterKey {
-    int       type[2];    /* 0=class, 1=method; 2 needed for start and end keys */
-    uint32_t  flags;      /* 1st bit = include cross-thread time */
-    char*     keys[2];    /* 2 needed for start and end keys */
-} FilterKey;
-
-typedef struct FilterTimes {
-    uint64_t   totalWaitTime;
-    uint64_t*  threadWaitTimes;
-    uint64_t*  threadExecutionTimesWhileWaiting;
-    uint64_t*  threadExecutionTimes;
-} FilterTimes;
-
-typedef struct Filter {
-    char*       filterName;
-    FilterKey*  filterKeys;
-    int         numKeys;
-    int         activeCount;
-    int*        activeThreads;
-    int*        activationKeys;
-    FilterTimes times;
-} Filter;
-
-int numFilters = 0; // global
-
 static Options gOptions;
 
 /* Escapes characters in the source string that are html special entities.
@@ -445,37 +398,6 @@
 
 /*
  * This comparison function is called from qsort() to sort
- * threads into decreasing order of elapsed time.
- */
-int compareElapsed(const void *a, const void *b) {
-    const ThreadEntry *threadA, *threadB;
-    uint64_t elapsed1, elapsed2;
-    int result = 0;
-
-    threadA = (ThreadEntry const *)a;
-    threadB = (ThreadEntry const *)b;
-    elapsed1 = threadA->elapsedTime;
-    elapsed2 = threadB->elapsedTime;
-    if (elapsed1 < elapsed2)
-        return 1;
-    if (elapsed1 > elapsed2)
-        return -1;
-
-    /* If the elapsed times of two threads are equal, then sort them
-     * by thread id.
-     */
-    int idA = threadA->threadId;
-    int idB = threadB->threadId;
-    if (idA < idB)
-        result = -1;
-    if (idA > idB)
-        result = 1;
-
-    return result;
-}
-
-/*
- * This comparison function is called from qsort() to sort
  * TimedMethods into decreasing order of inclusive elapsed time.
  */
 int compareTimedMethod(const void *a, const void *b) {
@@ -647,7 +569,6 @@
     free(pKeys->fileData);
     free(pKeys->threads);
     free(pKeys->methods);
-    free(pKeys->methodCache);
     free(pKeys);
 }
 
@@ -674,15 +595,20 @@
     return -1;
 }
 
-int countLinesToChar(const char* data, int len, const char toFind)
+/*
+ * Count the number of lines until the next token.
+ *
+ * Returns -1 if none found before EOF.
+ */
+int countLinesToToken(const char* data, int len)
 {
     int count = 0;
     int next;
 
-    while (*data != toFind) {
+    while (*data != TOKEN_CHAR) {
         next = findNextChar(data, len, '\n');
         if (next < 0)
-	    return count;
+            return -1;
         count++;
         data += next+1;
         len -= next+1;
@@ -692,16 +618,6 @@
 }
 
 /*
- * Count the number of lines until the next token.
- *
- * Returns 0 if none found before EOF.
- */
-int countLinesToToken(const char* data, int len)
-{
-    return countLinesToChar(data, len, TOKEN_CHAR);
-}
-
-/*
  * Make sure we're at the start of the right section.
  *
  * Returns the length of the token line, or -1 if something is wrong.
@@ -1060,6 +976,9 @@
     if (offset < 0)
         goto fail;
 
+    /* Reduce our allocation now that we know where the end of the key section is. */
+    pKeys->fileData = (char *)realloc(pKeys->fileData, offset);
+    pKeys->fileLen = offset;
     /* Leave fp pointing to the beginning of the data section. */
     fseek(fp, offset, SEEK_SET);
 
@@ -1085,7 +1004,7 @@
         printf("Methods (%d):\n", pKeys->numMethods);
         for (i = 0; i < pKeys->numMethods; i++) {
             printf("0x%08x %s : %s : %s\n",
-                   pKeys->methods[i].methodId >> 2, pKeys->methods[i].className,
+                   pKeys->methods[i].methodId, pKeys->methods[i].className,
                    pKeys->methods[i].methodName, pKeys->methods[i].signature);
         }
     }
@@ -1159,7 +1078,7 @@
 }
 
 /*
- * Look up a method by its method ID (using binary search).
+ * Look up a method by it's method ID.
  *
  * Returns NULL if no matching method was found.
  */
@@ -1167,18 +1086,6 @@
 {
     int hi, lo, mid;
     unsigned int id;
-    int hashedId;
-
-    /* Create cache if it doesn't already exist */
-    if (pKeys->methodCache == NULL) {
-        pKeys->methodCache = (int*) calloc(METHOD_CACHE_SIZE, sizeof(int));
-    }
-
-    // ids are multiples of 4, so shift
-    hashedId = (methodId >> 2) & METHOD_CACHE_SIZE_MASK;
-    if (pKeys->methodCache[hashedId]) /* cache hit */
-        if (pKeys->methods[pKeys->methodCache[hashedId]].methodId == methodId)
-	    return &pKeys->methods[pKeys->methodCache[hashedId]];
 
     lo = 0;
     hi = pKeys->numMethods - 1;
@@ -1187,11 +1094,9 @@
         mid = (hi + lo) / 2;
 
         id = pKeys->methods[mid].methodId;
-        if (id == methodId) {         /* match, put in cache */
-	    hashedId = (methodId >> 2) & METHOD_CACHE_SIZE_MASK;
-	    pKeys->methodCache[hashedId] = mid;
-	    return &pKeys->methods[mid];
-	} else if (id < methodId)       /* too low */
+        if (id == methodId)           /* match */
+            return &pKeys->methods[mid];
+        else if (id < methodId)       /* too low */
             lo = mid + 1;
         else                          /* too high */
             hi = mid - 1;
@@ -1585,7 +1490,6 @@
     printf("<ul>\n");
     printf("  <li><a href=\"#exclusive\">Exclusive profile</a></li>\n");
     printf("  <li><a href=\"#inclusive\">Inclusive profile</a></li>\n");
-    printf("  <li><a href=\"#thread\">Thread profile</a></li>\n");
     printf("  <li><a href=\"#class\">Class/method profile</a></li>\n");
     printf("  <li><a href=\"#method\">Method/class profile</a></li>\n");
     printf("</ul>\n\n");
@@ -1596,7 +1500,6 @@
     printf("<a href=\"#contents\">[Top]</a>\n");
     printf("<a href=\"#exclusive\">[Exclusive]</a>\n");
     printf("<a href=\"#inclusive\">[Inclusive]</a>\n");
-    printf("<a href=\"#thread\">[Thread]</a>\n");
     printf("<a href=\"#class\">[Class]</a>\n");
     printf("<a href=\"#method\">[Method]</a>\n");
     printf("<br><br>\n");
@@ -1772,10 +1675,12 @@
     char classBuf[HTML_BUFSIZE], methodBuf[HTML_BUFSIZE];
     char signatureBuf[HTML_BUFSIZE];
     char anchor_buf[80];
+    char *anchor_close = "";
 
     total = sumThreadTime;
     anchor_buf[0] = 0;
     if (gOptions.outputHtml) {
+        anchor_close = "</a>";
         printf("<a name=\"inclusive\"></a>\n");
         printf("<hr>\n");
         outputNavigationBar();
@@ -1860,122 +1765,6 @@
     }
 }
 
-void printThreadProfile(ThreadEntry *pThreads, int numThreads, uint64_t sumThreadTime, Filter** filters)
-{
-    int ii, jj;
-    ThreadEntry thread;
-    double total, per, sum_per;
-    uint64_t sum;
-    char threadBuf[HTML_BUFSIZE];
-    char anchor_buf[80];
-    int drawTable;
-
-    total = sumThreadTime;
-    anchor_buf[0] = 0;
-    if (gOptions.outputHtml) {
-        printf("<a name=\"thread\"></a>\n");
-        printf("<hr>\n");
-        outputNavigationBar();
-    } else {
-        printf("\n%s\n", profileSeparator);
-    }
-
-    /* Sort the threads into decreasing order of elapsed time. */
-    qsort(pThreads, numThreads, sizeof(ThreadEntry), compareElapsed);
-
-    printf("\nElapsed times for each thread, sorted by elapsed time.\n");
-    printf("Also includes percentage of time spent during the <i>execution</i> of any filters.\n\n");
-
-    if (gOptions.outputHtml) {
-        printf("<br><br>\n<pre>\n");
-    }
-
-    printf("    Usecs   self %%  sum %%");
-    for (ii = 0; ii < numFilters; ++ii) {
-        printf("  %s %%", filters[ii]->filterName);
-    }
-    printf("  tid   ThreadName\n");
-    sum = 0;
-
-    for (ii = 0; ii < numThreads; ++ii) {
-        int threadId;
-        char *threadName;
-        uint64_t time;
-
-        thread = pThreads[ii];
-
-        threadId = thread.threadId;
-        threadName = (char*)(thread.threadName);
-        time = thread.elapsedTime;
-
-        sum += time;
-        per = 100.0 * time / total;
-        sum_per = 100.0 * sum / total;
-
-        if (gOptions.outputHtml) {
-	    threadName = htmlEscape(threadName, threadBuf, HTML_BUFSIZE);
-        }
-
-	printf("%9llu  %6.2f %6.2f", time, per, sum_per);
-	for (jj = 0; jj < numFilters; jj++) {
-	    printf(" %6.2f", 100.0 * filters[jj]->times.threadExecutionTimes[threadId] / time);
-	}
-	printf("    %3d %s\n", threadId, threadName);
-    }
-
-    if (gOptions.outputHtml)
-        printf("</pre><br />");
-
-    printf("\n\nBreak-down of portion of time spent by each thread while waiting on a filter method.\n");
-
-    for (ii = 0; ii < numFilters; ++ii) {
-        // Draw a table for each filter that measures wait time
-        drawTable = 0;
-	for (jj = 0; jj < filters[ii]->numKeys; jj++)
-	    if (filters[ii]->filterKeys[jj].flags == 1)
-	        drawTable = 1;
-
-	if (drawTable) {
-
-	    if (gOptions.outputHtml)
-	        printf("<br/><br/>\n<pre>\n");
-	    printf("Filter: %s\n", filters[ii]->filterName);
-	    printf("Total waiting cycles: %llu (%6.2f%% of total)\n",
-		   filters[ii]->times.totalWaitTime,
-		   100.0 * filters[ii]->times.totalWaitTime / sum);
-
-	    if (filters[ii]->times.totalWaitTime > 0) {
-
-	        printf("Details: \n\n");
-
-		printf(" Waiting cycles    %% of total waiting time   execution time while waiting    thread name\n");
-
-		for (jj = 0; jj < numThreads; jj++) {
-
-		    thread = pThreads[jj];
-
-		    char *threadName;
-		    threadName = (char*) thread.threadName;
-		    if (gOptions.outputHtml) {
-		        threadName = htmlEscape(threadName, threadBuf, HTML_BUFSIZE);
-		    }
-
-		    printf(" %9llu                   %6.2f                     %6.2f               %s\n",
-			   filters[ii]->times.threadWaitTimes[thread.threadId],
-			   100.0 * filters[ii]->times.threadWaitTimes[thread.threadId] / filters[ii]->times.totalWaitTime,
-			   100.0 * filters[ii]->times.threadExecutionTimesWhileWaiting[thread.threadId] / filters[ii]->times.totalWaitTime,
-			   threadName);
-		}
-	    }
-
-	    if (gOptions.outputHtml)
-	        printf("</pre>\n");
-
-	}
-    }
-
-}
-
 void createClassList(TraceData* traceData, MethodEntry **pMethods, int numMethods)
 {
     int ii;
@@ -2477,464 +2266,16 @@
 }
 
 /*
- * Determines whether the given FilterKey matches the method. The FilterKey's
- * key that is used to match against the method is determined by index.
- */
-int keyMatchesMethod(FilterKey filterKey, MethodEntry* method, int index)
-{
-    if (filterKey.type[index] == 0) { // Class
-#if 0
-        fprintf(stderr, "  class is %s; filter key is %s\n", method->className, filterKey.keys[index]);
-#endif
-        if (strcmp(method->className, filterKey.keys[index]) == 0) {
-	    return 1;
-	}
-    } else { // Method
-        if (method->methodName != NULL) {
-	    // Get fully-qualified name
-            // TODO: parse class name and method name an put them in structure to avoid
-            // allocating memory here
-	    char* str = malloc ((strlen(method->className) + strlen(method->methodName) + 2) * sizeof(char));
-	    strcpy(str, method->className);
-	    strcat(str, ".");
-	    strcat(str, method->methodName);
-#if 0
-	    fprintf(stderr, "  method is %s; filter key is %s\n", str, filterKey.keys[index]);
-#endif
-	    if (strcmp(str, filterKey.keys[index]) == 0) {
-	        free(str);
-	        return 1;
-	    }
-	    free(str);
-	}
-    }
-    return 0;
-}
-
-/*
- * Adds the appropriate times to the given filter based on the given method. Activates and
- * de-activates filters as necessary.
- *
- * A filter is activated when the given method matches the 'entry' key of one of its FilterKeys.
- * It is de-activated when the method matches the 'exit' key of the same FilterKey that activated it
- * in the first place. Thus, a filter may be active more than once on the same thread (activated by
- * different FilterKeys). A filter may also be active on different threads at the same time.
- *
- * While the filter is active on thread 1, elapsed time is allocated to different buckets which
- * include: thread execution time (i.e., time thread 1 spent executing while filter was active),
- * thread waiting time (i.e., time thread 1 waited while other threads executed), and execution
- * time while waiting (i.e., time thread x spent executing while thread 1 was waiting). We also
- * keep track of the total waiting time for a given filter.
- *
- * Lastly, we keep track of remaining (un-allocated) time for cases in which we exit a method we
- * had not entered before, and that method happens to match the 'exit' key of a FilterKey.
- */
-int filterMethod(MethodEntry* method, Filter* filter, int entry, int threadId, int numThreads,
-		 uint64_t elapsed, uint64_t remTime)
-{
-    int ii, jj;
-    int activeCount, addedWaitTimeThreadsCount;
-    int* activeThreads;
-    int* activationKeys;
-    int* addedWaitTimeThreads;
-
-    // flags
-    int addWaitTime = 0;
-    int deactivation = 0;
-    int addedExecutionTime = 0;
-    int addedExecutionTimeWhileWaiting = 0;
-    int addedWaitTime;
-    int addedRemTime = 0;
-    int threadKeyPairActive = 0;
-
-    if (filter->times.threadWaitTimes == NULL && filter->times.threadExecutionTimes == NULL &&
-	filter->times.threadExecutionTimesWhileWaiting == NULL) {
-        filter->times.threadWaitTimes = (uint64_t*) calloc(MAX_THREADS, sizeof(uint64_t));
-	filter->times.threadExecutionTimesWhileWaiting =
-          (uint64_t*) calloc(MAX_THREADS, sizeof(uint64_t));
-	filter->times.threadExecutionTimes = (uint64_t*) calloc(MAX_THREADS, sizeof(uint64_t));
-    }
-
-    int verbose = 0;
-
-    if (verbose)
-        fprintf(stderr,
-                "Running %s filter for class %s method %s, thread %d; activeCount: %d time: %llu\n",
-                filter->filterName, method->className, method->methodName, threadId,
-                filter->activeCount, elapsed);
-
-    // If active on some thread
-    if (filter->activeCount > 0) {
-
-        // Initialize active structures in case there are any de-activations
-        activeThreads = (int*) calloc(filter->activeCount, sizeof(int));
-	activationKeys = (int*) calloc(filter->activeCount, sizeof(int));
-	activeCount = 0;
-
-	// Initialize structure to help us determine which threads we've already added wait time to
-	addedWaitTimeThreads = (int*) calloc(filter->activeCount, sizeof(int));
-	addedWaitTimeThreadsCount = 0;
-
-        // Add times to appropriate sums and de-activate (if necessary)
-        for (ii = 0; ii < filter->activeCount; ii++) {
-
-	    if (verbose) {
-	        fprintf(stderr, "  Analyzing active thread with id %d, activated by key [%s, %s]\n",
-			filter->activeThreads[ii],
-                        filter->filterKeys[filter->activationKeys[ii]].keys[0],
-			filter->filterKeys[filter->activationKeys[ii]].keys[1]);
-	    }
-
-	    // If active on THIS thread -> add to execution time (only add once!)
-	    if (filter->activeThreads[ii] == threadId && !addedExecutionTime) {
-	        if (verbose)
-		    fprintf(stderr, "  Adding execution time to this thead\n");
-	        filter->times.threadExecutionTimes[threadId] += elapsed;
-		addedExecutionTime = 1;
-	    }
-
-	    // If active on ANOTHER thread (or this one too) with CROSS_THREAD_FLAG -> add to
-            // both thread's waiting time + total
-	    if (filter->filterKeys[filter->activationKeys[ii]].flags == 1) {
-
-	        // Add time to thread that is waiting (add to each waiting thread at most once!)
-	        addedWaitTime = 0;
-		for (jj = 0; jj < addedWaitTimeThreadsCount; jj++) {
-		    if (addedWaitTimeThreads[jj] == filter->activeThreads[ii])
-		        addedWaitTime = 1;
-		}
-	        if (!addedWaitTime) {
-		    if (verbose)
-		        fprintf(stderr, "  Adding wait time to waiting thread\n");
-		    filter->times.threadWaitTimes[filter->activeThreads[ii]] += elapsed;
-		    addedWaitTimeThreads[addedWaitTimeThreadsCount++] = filter->activeThreads[ii];
-		}
-
-                // Add execution time to this thread while the other is waiting (only add once!)
-                // [Flag is needed only because outside for loop might iterate through same
-                // thread twice?] TODO: verify
-		if (!addedExecutionTimeWhileWaiting) {
-		    if (verbose)
-		        fprintf(stderr, "  Adding exec time to this thread while thread waits\n");
-		    filter->times.threadExecutionTimesWhileWaiting[threadId] += elapsed;
-		    addedExecutionTimeWhileWaiting = 1;
-		}
-
-		addWaitTime = 1;
-	    }
-
-	    // If a method exit matches the EXIT method of an ACTIVE key -> de-activate
-            // the KEY (not the entire filter!!)
-	    if (!entry && keyMatchesMethod(filter->filterKeys[filter->activationKeys[ii]],
-					   method, 1)) {
-	        if (verbose)
-		    fprintf(stderr, "  Exit key matched!\n");
-
-	        // Deactivate by removing (NOT adding) entries from activeThreads and activationKeys
-	        deactivation = 1; // singal that lists should be replaced
-	    } else {
-	        // No de-activation -> copy old entries into new lists
-	        activeThreads[activeCount] = filter->activeThreads[ii];
-		activationKeys[activeCount++] = filter->activationKeys[ii];
-	    }
-	}
-
-	// If waiting on ANY thread, add wait time to total (but only ONCE!)
-	if (addWaitTime) {
-	    filter->times.totalWaitTime += elapsed;
-	}
-
-	// If de-activation occurred, replace lists
-	if (deactivation) {
-	    // TODO: Free memory from old lists
-
-	    // Set new lists
-	    filter->activeThreads = activeThreads;
-	    filter->activationKeys = activationKeys;
-	    filter->activeCount = activeCount;
-	} else {
-	    // TODO: Free memory from new lists
-	}
-
-    }  // Else, continue (we might be activating the filter on a different thread)
-
-
-    if (entry) { // ENTRY
-        if (verbose)
-	    fprintf(stderr, "  Here at the entry\n");
-        // If method matches entry key -> activate thread (do not add time since it's a new entry!)
-        for (ii = 0; ii < filter->numKeys; ii++) {
-	    if (keyMatchesMethod(filter->filterKeys[ii], method, 0)) {
-	        if (verbose)
-		    fprintf(stderr, "  Entry key matched!\n");
-	        // Activate thread only if thread/key pair is not already active
-	        for (jj = 0; jj < filter->activeCount; jj++) {
-		    if (filter->activeThreads[jj] == threadId && filter->activationKeys[jj] == ii)
-		        threadKeyPairActive = 1;
-		}
-	        // TODO: WORRY ABOUT MEMORY WHEN ACTIVE_COUNT > DEFAULT_ACTIVE_THREAD (unlikely)
-	        // TODO: what if the same thread is active multiple times by different keys?
-		// nothing, we just have to make sure we dont double-add, and we dont..
-		if (!threadKeyPairActive) {
-		    filter->activeThreads[filter->activeCount] = threadId;
-		    filter->activationKeys[filter->activeCount++] = ii;
-		}
-	    }
-	}
-    } else { // EXIT
-        // If method matches a terminal key -> add remTime to total (no need to active/de-activate)
-        for (ii = 0; ii < filter->numKeys; ii++) {
-	    if (!deactivation && keyMatchesMethod(filter->filterKeys[ii], method, 1) &&
-		keyMatchesMethod(filter->filterKeys[ii], method, 0)) {
-	        // Add remTime(s)
-	        // TODO: think about how we should add remTimes.. should we add remTime to threads
-	        // that were waiting or being waited on? for now, keep it simple and just add the
-	        // execution time to the current thread.
-	        filter->times.threadExecutionTimes[threadId] += remTime;
-		addedRemTime = 1;
-	    }
-	}
-    }
-
-    return addedExecutionTime | (addedRemTime << 1);
-}
-
-void dumpFilters(Filter** filters) {
-    int i;
-    for (i = 0; i < numFilters; i++) {
-        int j;
-	fprintf(stderr, "FILTER %s\n", filters[i]->filterName);
-	for (j = 0; j < filters[i]->numKeys; j++) {
-	    fprintf(stderr, "Keys: %s, type %d", filters[i]->filterKeys[j].keys[0],
-		    filters[i]->filterKeys[j].type[0]);
-	    if (filters[i]->filterKeys[j].keys[1] != NULL) {
-	        fprintf(stderr, " AND %s, type %d", filters[i]->filterKeys[j].keys[1],
-			filters[i]->filterKeys[j].type[1]);
-	    }
-	    fprintf(stderr, "; flags: %d\n", filters[i]->filterKeys[j].flags);
-	}
-    }
-}
-
-/*
- * See parseFilters for required data format.
- * 'data' must point to the beginning of a filter definition.
- */
-char* parseFilter(char* data, char* dataEnd, Filter** filters, int num) {
-
-    Filter* filter;
-    int next, count, i;
-    int tmpOffset, tmpKeyLen;
-    char* tmpKey;
-    char* key1;
-    char* key2;
-
-    filter = (Filter*) malloc(sizeof(Filter));
-    filter->activeCount = 0;
-    filter->activeThreads = (int*) calloc(DEFAULT_ACTIVE_THREADS, sizeof(int));
-    filter->activationKeys = (int*) calloc(DEFAULT_ACTIVE_THREADS, sizeof(int));
-
-    next = findNextChar(data + 1, dataEnd - data - 1, '\n');
-    if (next < 0) {
-        // TODO: what should we do here?
-        // End of file reached...
-    }
-    data[next+1] = '\0';
-    filter->filterName = data + 1;
-    data += next + 2; // Careful
-
-    /*
-     * Count the number of keys (one per line).
-     */
-    count = countLinesToChar(data, dataEnd - data, FILTER_TAG);
-    if (count <= 0) {
-        fprintf(stderr,
-		"ERROR: failed while parsing filter %s (found %d keys)\n",
-		filter->filterName, count);
-	return NULL; // TODO: Should do something else
-	// Could return filter with 0 keys instead (probably better to avoid random segfaults)
-    }
-
-    filter->filterKeys = (FilterKey*) malloc(sizeof(FilterKey) * count);
-
-    /*
-     * Extract all entries.
-     */
-    tmpOffset = 0;
-    for (i = 0; i < count; i++) {
-        next = findNextChar(data, dataEnd - data, '\n');
-	//        assert(next > 0); // TODO: revise... (skip if next == 0 ?)
-        data[next] = '\0';
-	tmpKey = data;
-
-        if (*data == FILTER_FLAG_THREAD) {
-            filter->filterKeys[i].flags = 1;
-            tmpKey++;
-	} else {
-            filter->filterKeys[i].flags = 0;
-	}
-
-	tmpOffset = findNextChar(tmpKey, next, ',');
-
-        if (tmpOffset < 0) {
-            // No comma, so only 1 key
-            key1 = tmpKey;
-	    key2 = tmpKey;
-
-	    // Get type for key1
-            filter->filterKeys[i].type[0] = FILTER_TYPE_CLASS; // default
-            tmpOffset = findNextChar(key1, next, '(');
-	    if (tmpOffset > 0) {
-	        if (findNextChar(key1, next, ')') == tmpOffset + 1) {
-		    filter->filterKeys[i].type[0] = FILTER_TYPE_METHOD;
-		    filter->filterKeys[i].type[1] = FILTER_TYPE_METHOD;
-		}
-		key1[tmpOffset] = '\0';
-	    }
-	} else {
-	    // Pair of keys
-	    tmpKey[tmpOffset] = '\0';
-	    key1 = tmpKey;
-	    key2 = tmpKey + tmpOffset + 1;
-
-	    // Get type for key1
-	    filter->filterKeys[i].type[0] = FILTER_TYPE_CLASS;
-	    tmpKeyLen = tmpOffset;
-            tmpOffset = findNextChar(key1, tmpKeyLen, '(');
-	    if (tmpOffset > 0) {
-	        if (findNextChar(key1, tmpKeyLen, ')') == tmpOffset + 1) {
-		    filter->filterKeys[i].type[0] = FILTER_TYPE_METHOD;
-		}
-		key1[tmpOffset] = '\0';
-	    }
-
-	    // Get type for key2
-	    filter->filterKeys[i].type[1] = FILTER_TYPE_CLASS;
-            tmpOffset = findNextChar(key2, next - tmpKeyLen, '(');
-	    if (tmpOffset > 0) {
-	        if (findNextChar(key2, next - tmpKeyLen, ')') == tmpOffset + 1) {
-		    filter->filterKeys[i].type[1] = FILTER_TYPE_METHOD;
-		}
-		key2[tmpOffset] = '\0';
-	    }
-	}
-
-	filter->filterKeys[i].keys[0] = key1;
-	filter->filterKeys[i].keys[1] = key2;
-        data += next+1;
-    }
-
-    filter->numKeys = count;
-    filters[num] = filter;
-
-    return data;
-}
-
-/*
- * Parses filters from given file. The file must follow the following format:
- *
- * *FilterName    <- creates a new filter with keys to follow
- * A.method()     <- key that triggers whenever A.method() enters/exit
- * Class          <- key that triggers whenever any method from Class enters/exits
- * +CrossThread   <- same as above, but keeps track of execution times accross threads
- * B.m(),C.m()    <- key that triggers filter on when B.m() enters and off when C.m() exits
- *
- * TODO: add concrete example to make things clear
- */
-Filter** parseFilters(const char* filterFileName) {
-
-    Filter** filters = NULL;
-    FILE* fp = NULL;
-    long len;
-    char* data;
-    char* dataEnd;
-    char* dataStart;
-    int i, next, count;
-
-    fp = fopen(filterFileName, "r");
-    if (fp == NULL)
-        goto bail;
-
-    if (fseek(fp, 0L, SEEK_END) != 0) {
-        perror("fseek");
-        goto bail;
-    }
-
-    len = ftell(fp);
-    if (len == 0) {
-        fprintf(stderr, "WARNING: Filter file is empty.\n");
-        goto bail;
-    }
-    rewind(fp);
-
-    data = (char*) malloc(len);
-    if (data == NULL) {
-        fprintf(stderr, "ERROR: unable to alloc %ld bytes for filter file\n", len);
-        goto bail;
-    }
-
-    // Read file into memory
-    if (fread(data, 1, len, fp) != (size_t) len) {
-        fprintf(stderr, "ERROR: unable to read %ld bytes from filter file\n", len);
-        goto bail;
-    }
-
-    dataStart = data;
-    dataEnd = data + len;
-
-    // Figure out how many filters there are
-    numFilters = 0;
-    next = -1;
-
-    while (1) {
-        if (*data == FILTER_TAG)
-	    numFilters++;
-        next = findNextChar(data, len, '\n');
-        if (next < 0)
-            break;
-        data += next+1;
-        len -= next+1;
-    }
-
-    if (numFilters == 0) {
-        fprintf(stderr, "WARNING: no filters found. Continuing without filters\n");
-        goto bail;
-    }
-
-    filters = (Filter**) calloc(numFilters, sizeof(Filter *));
-    if (filters == NULL) {
-        fprintf(stderr, "ERROR: unable to alloc memory for filters");
-        goto bail;
-    }
-
-    data = dataStart;
-    for (i = 0; i < numFilters; i++) {
-        data = parseFilter(data, dataEnd, filters, i);
-    }
-
-    return filters;
-
-bail:
-    if (fp != NULL)
-        fclose(fp);
-
-    return NULL;
-
-}
-
-
-/*
  * Read the key and data files and return the MethodEntries for those files
  */
-DataKeys* parseDataKeys(TraceData* traceData, const char* traceFileName,
-			uint64_t* threadTime, Filter** filters)
+DataKeys* parseDataKeys(TraceData* traceData, const char* traceFileName, uint64_t* threadTime)
 {
     DataKeys* dataKeys = NULL;
     MethodEntry **pMethods = NULL;
     MethodEntry* method;
     FILE* dataFp = NULL;
     DataHeader dataHeader;
-    int ii, jj, numThreads;
+    int ii;
     uint64_t currentTime;
     MethodEntry* caller;
 
@@ -2943,13 +2284,11 @@
         goto bail;
 
     if ((dataKeys = parseKeys(dataFp, 0)) == NULL)
-       goto bail;
+        goto bail;
 
     if (parseDataHeader(dataFp, &dataHeader) < 0)
         goto bail;
 
-    numThreads = dataKeys->numThreads;
-
 #if 0
     FILE *dumpStream = fopen("debug", "w");
 #endif
@@ -2959,7 +2298,6 @@
         int action;
         unsigned int methodId;
         CallStack *pStack;
-
         /*
          * Extract values from file.
          */
@@ -2978,7 +2316,6 @@
             pStack->top = 0;
             pStack->lastEventTime = currentTime;
             pStack->threadStartTime = currentTime;
-	    pStack->remTimes = (uint64_t*) calloc(numFilters, sizeof(uint64_t));
             traceData->stacks[threadId] = pStack;
         }
 
@@ -2989,16 +2326,16 @@
 
 #if 0
         if (method->methodName) {
-	    fprintf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s.%s %s\n",
-	           threadId, currentTime, action, pStack->threadStartTime,
-	           method->recursiveEntries,
-	           pStack->top, method->className, method->methodName,
-	           method->signature);
+            fprintf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s.%s %s\n",
+                    threadId, currentTime, action, pStack->threadStartTime,
+                    method->recursiveEntries,
+                    pStack->top, method->className, method->methodName,
+                    method->signature);
         } else {
-	    printf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s\n",
-	           threadId, currentTime, action, pStack->threadStartTime,
-	           method->recursiveEntries,
-	           pStack->top, method->className);
+            fprintf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s\n",
+                    threadId, currentTime, action, pStack->threadStartTime,
+                    method->recursiveEntries,
+                    pStack->top, method->className);
         }
 #endif
 
@@ -3031,26 +2368,6 @@
             /* Push the method on the stack for this thread */
             pStack->calls[pStack->top].method = method;
             pStack->calls[pStack->top++].entryTime = currentTime;
-
-	    // For each filter
-	    int result = 0;
-	    for (ii = 0; ii < numFilters; ii++) {
-	        result = filterMethod(method, filters[ii], 1, threadId, numThreads,
-				       currentTime - pStack->lastEventTime, pStack->remTimes[ii]);
-
-		// TODO: make remTimes work properly
-		// Consider moving remTimes handling together with the rest
-		// of time handling and clean up the return codes
-		/*
-		if (result == 0) { // no time added, no remTime added
-		    pStack->remTimes[ii] += currentTime - pStack->lastEventTime;
-		} else if (result == 3 || result == 4) { // remTime added
-		    // Reset remTime, since it's been added
-		    pStack->remTimes[ii] = 0;
-		}
-		*/
-	    }
-
         } else {
             /* This is a method exit */
             uint64_t entryTime = 0;
@@ -3088,24 +2405,6 @@
             if (method->recursiveEntries == 0) {
                 method->topExclusive += currentTime - pStack->lastEventTime;
             }
-
-	    // For each filter
-	    int result = 0;
-	    for (ii = 0; ii < numFilters; ii++) {
-	        result = filterMethod(method, filters[ii], 0, threadId, numThreads,
-				       currentTime - pStack->lastEventTime, pStack->remTimes[ii]);
-
-		// TODO: make remTimes work properly
-		/*
-		if (result == 0) { // no time added, no remTime added
-		    pStack->remTimes[ii] += currentTime - pStack->lastEventTime;
-		} else if (result == 3 || result == 4) { // remTime added
-		    // Reset remTime, since it's been added
-		    pStack->remTimes[ii] = 0;
-		}
-		*/
-	    }
-
         }
         /* Remember the time of the last entry or exit event */
         pStack->lastEventTime = currentTime;
@@ -3117,23 +2416,18 @@
      */
     CallStack *pStack;
     int threadId;
-    uint64_t elapsedTime = 0;
     uint64_t sumThreadTime = 0;
     for (threadId = 0; threadId < MAX_THREADS; ++threadId) {
-
         pStack = traceData->stacks[threadId];
 
         /* If this thread never existed, then continue with next thread */
         if (pStack == NULL)
             continue;
 
-        /* Calculate times spent in thread, and add it to total time */
-        elapsedTime = pStack->lastEventTime - pStack->threadStartTime;
-        sumThreadTime += elapsedTime;
+        /* Also, add up the time taken by all of the threads */
+        sumThreadTime += pStack->lastEventTime - pStack->threadStartTime;
 
         for (ii = 0; ii < pStack->top; ++ii) {
-	  //printf("in loop\n");
-
             if (ii == 0)
                 caller = &dataKeys->methods[TOPLEVEL_INDEX];
             else
@@ -3145,33 +2439,7 @@
             uint64_t entryTime = pStack->calls[ii].entryTime;
             uint64_t elapsed = pStack->lastEventTime - entryTime;
             addInclusiveTime(caller, method, elapsed);
-
-	    // For each filter
-	    int result = 0;
-	    for (ii = 0; ii < numFilters; ii++) {
-	        result = filterMethod(method, filters[ii], 0, threadId, numThreads,
-				       currentTime - pStack->lastEventTime, pStack->remTimes[ii]);
-
-		// TODO: make remTimes work properly
-		/*
-		if (result == 0) { // no time added, no remTime added
-		    pStack->remTimes[ii] += currentTime - pStack->lastEventTime;
-		} else if (result == 3 || result == 4) { // remTime added
-		    // Reset remTime, since it's been added
-		    pStack->remTimes[ii] = 0;
-		}
-		*/
-	    }
         }
-
-	/* Save the per-thread elapsed time in the DataKeys struct */
-	for (ii = 0; ii < dataKeys->numThreads; ++ii) {
-	    if (dataKeys->threads[ii].threadId == threadId) {
-                dataKeys->threads[ii].elapsedTime = elapsedTime;
-	    }
-	}
-
-
     }
     caller = &dataKeys->methods[TOPLEVEL_INDEX];
     caller->elapsedInclusive = sumThreadTime;
@@ -3208,14 +2476,12 @@
     return pMethods;
 }
 
-
 /*
  * Produce a function profile from the following methods
  */
-void profileTrace(TraceData* traceData, MethodEntry **pMethods, int numMethods, uint64_t sumThreadTime,
-                  ThreadEntry *pThreads, int numThreads, Filter** filters)
+void profileTrace(TraceData* traceData, MethodEntry **pMethods, int numMethods, uint64_t sumThreadTime)
 {
-   /* Print the html header, if necessary */
+    /* Print the html header, if necessary */
     if (gOptions.outputHtml) {
         printf(htmlHeader, gOptions.sortableUrl);
         outputTableOfContents();
@@ -3224,8 +2490,6 @@
     printExclusiveProfile(pMethods, numMethods, sumThreadTime);
     printInclusiveProfile(pMethods, numMethods, sumThreadTime);
 
-    printThreadProfile(pThreads, numThreads, sumThreadTime, filters);
-
     createClassList(traceData, pMethods, numMethods);
     printClassProfiles(traceData, sumThreadTime);
 
@@ -3491,7 +2755,7 @@
     if (gOptions.outputHtml) {
         printf("</table>\n");
         printf("<h3>Run 1 methods not found in Run 2</h3>");
-        printf(tableHeaderMissing);
+        printf(tableHeaderMissing, "?");
     }
 
     for (i = 0; i < d1->numMethods; ++i) {
@@ -3503,7 +2767,7 @@
     if (gOptions.outputHtml) {
         printf("</table>\n");
         printf("<h3>Run 2 methods not found in Run 1</h3>");
-        printf(tableHeaderMissing);
+        printf(tableHeaderMissing, "?");
     }
 
     for (i = 0; i < d2->numMethods; ++i) {
@@ -3517,10 +2781,10 @@
 
 int usage(const char *program)
 {
-    fprintf(stderr, "usage: %s [-ho] [-s sortable] [-d trace-file-name] [-g outfile] [-f filter-file] trace-file-name\n", program);
+    fprintf(stderr, "Copyright (C) 2006 The Android Open Source Project\n\n");
+    fprintf(stderr, "usage: %s [-ho] [-s sortable] [-d trace-file-name] [-g outfile] trace-file-name\n", program);
     fprintf(stderr, "  -d trace-file-name  - Diff with this trace\n");
     fprintf(stderr, "  -g outfile          - Write graph to 'outfile'\n");
-    fprintf(stderr, "  -f filter-file      - Filter functions as specified in file\n");
     fprintf(stderr, "  -k                  - When writing a graph, keep the intermediate DOT file\n");
     fprintf(stderr, "  -h                  - Turn on HTML output\n");
     fprintf(stderr, "  -o                  - Dump the dmtrace file instead of profiling\n");
@@ -3533,7 +2797,7 @@
 int parseOptions(int argc, char **argv)
 {
     while (1) {
-        int opt = getopt(argc, argv, "d:hg:kos:t:f:");
+        int opt = getopt(argc, argv, "d:hg:kos:t:");
         if (opt == -1)
             break;
         switch (opt) {
@@ -3543,9 +2807,6 @@
             case 'g':
                 gOptions.graphFileName = optarg;
                 break;
-            case 'f':
-	        gOptions.filterFileName = optarg;
-                break;
             case 'k':
                 gOptions.keepDotFile = 1;
                 break;
@@ -3573,7 +2834,6 @@
  */
 int main(int argc, char** argv)
 {
-
     gOptions.threshold = -1;
 
     // Parse the options
@@ -3593,15 +2853,9 @@
 
     uint64_t sumThreadTime = 0;
 
-    Filter** filters = NULL;
-    if (gOptions.filterFileName != NULL) {
-        filters = parseFilters(gOptions.filterFileName);
-    }
-
     TraceData data1;
-    memset(&data1, 0, sizeof(data1));
     DataKeys* dataKeys = parseDataKeys(&data1, gOptions.traceFileName,
-                                       &sumThreadTime, filters);
+                                       &sumThreadTime);
     if (dataKeys == NULL) {
         fprintf(stderr, "Cannot read trace.\n");
         exit(1);
@@ -3610,15 +2864,14 @@
     if (gOptions.diffFileName != NULL) {
         uint64_t sum2;
         TraceData data2;
-        DataKeys* d2 = parseDataKeys(&data2, gOptions.diffFileName, &sum2, filters);
+        DataKeys* d2 = parseDataKeys(&data2, gOptions.diffFileName, &sum2);
 
         createDiff(d2, sum2, dataKeys, sumThreadTime);
 
         freeDataKeys(d2);
     } else {
         MethodEntry** methods = parseMethodEntries(dataKeys);
-        profileTrace(&data1, methods, dataKeys->numMethods, sumThreadTime,
-                     dataKeys->threads, dataKeys->numThreads, filters);
+        profileTrace(&data1, methods, dataKeys->numMethods, sumThreadTime);
         if (gOptions.graphFileName != NULL) {
             createInclusiveProfileGraphNew(dataKeys);
         }
diff --git a/tools/dmtracedump/filters b/tools/dmtracedump/filters
deleted file mode 100644
index 96a041c..0000000
--- a/tools/dmtracedump/filters
+++ /dev/null
@@ -1,42 +0,0 @@
-*GC
-dvmGcScanRootClassLoader
-mspace_walk_free_pages
-dvmCollectGarbageInternal
-doHeapWork
-dvmGetNextHeapWorkerObject
-GC
-GC2
-GC3
-*Net
-setsockopt
-+sys_setsockopt [kernel]
-socketSelect
-send
-recv
-sendto
-recvfrom
-+sys_sendto [kernel]
-+sys_recvfrom [kernel]
-org.apache.harmony.luni.internal.net.www.protocol.http.HttpURLConnection
-android.net.http.ConnectionThread
-PlainSocketImpl
-WebCore::HTMLTokenizer
-*IO
-select
-+sys_select [kernel]
-*DB
-android.database.sqlite.SQLiteOpenHelper
-android.database.sqlite.SQLiteQueryBuilder
-android.database.sqlite.SQLiteDatabase
-android.database.sqlite.SQLiteDirectCursorDriver
-android.database.sqlite.SQLiteQuery
-android.database.sqlite.SQLiteProgram
-android.database.AbstractCursor
-android.database.sqlite.SQLiteCursor
-*UI
-android.view.View.draw()
-android.view.ViewGroup
-*Sync
-+java.lang.Object.wait()
-*Useless
-+android.widget.ProgressBar
diff --git a/tools/dmtracedump/tests/filters/run_tests.sh b/tools/dmtracedump/tests/filters/run_tests.sh
deleted file mode 100755
index cdf87cb..0000000
--- a/tools/dmtracedump/tests/filters/run_tests.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-failed=0
-for file in $(find $1 -type f -iname 'test*'); do
-  case $file in
-    *testFilters) continue; ;;
-    *Expected) continue; ;;
-    *Trace) continue; ;;
-    *.html) continue; ;;
-  esac
-
-  echo "Running test for $file"
-
-#  create_test_dmtrace $file tmp.trace
-  dmtracedump -f testFilters -h "$file"Trace > tmp.html 2> /dev/null
-
-  output=`diff tmp.html "$file"Expected 2>&1`
-  if [ ${#output} -eq 0 ]
-  then
-    echo "  OK"
-  else
-    echo " Test failed: $output"
-    failed=`expr $failed + 1`
-  fi
-
-done
-
-rm tmp.trace
-rm tmp.html
-
-if [ $failed -gt 0 ]
-then
-  echo "$failed test(s) failed"
-else
-  echo "All tests passed successfully"
-fi
diff --git a/tools/dmtracedump/tests/filters/testFilters b/tools/dmtracedump/tests/filters/testFilters
deleted file mode 100644
index 2c3edb6..0000000
--- a/tools/dmtracedump/tests/filters/testFilters
+++ /dev/null
@@ -1,9 +0,0 @@
-*FirstFilter
-+A.m(),B.m()
-+C.m()
-+R.m(),S.m()
-*SecondFilter
-+D.m(),E.m()
-+F.m()
-*RepeatedFilter
-+R.m(),S.m()
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeys
deleted file mode 100644
index b4367c6..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____             ____       _________
-# __|A   |___________|B   |_____|Z        |_______
-#
-#         ___________       ____           ____
-# _______|Z          |_____|D   |_________|E   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-4 2 Z
-2 1 B
-4 1 B
-4 2 D
-6 2 D
-4 1 Z
-8 1 Z
-6 2 E
-8 2 E
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index 7fa789a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> D.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> E.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 D.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 E.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 E.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00   0.00  50.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                      50.00               main
-         0                     0.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                      50.00               main
-         8                   100.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;E.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index 8bc74ff..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeys
deleted file mode 100644
index 76cdea7..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____             ____       _________
-# __|R   |___________|S   |_____|Z        |_______
-#
-#         ___________       ____           ____
-# _______|Z          |_____|R   |_________|S   |__
-#
-#
-0 1 R
-2 1 R
-0 2 Z
-4 2 Z
-2 1 S
-4 1 S
-4 2 R
-6 2 R
-4 1 Z
-8 1 Z
-6 2 S
-8 2 S
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysExpected
deleted file mode 100644
index 5672826..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> R.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 R.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00  50.00      1 main
-        8   50.00 100.00  50.00   0.00  50.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 16 (100.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    50.00                      50.00               main
-         8                    50.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 16 (100.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    50.00                      50.00               main
-         8                    50.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysTrace
deleted file mode 100644
index 9ec7378..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeys
deleted file mode 100644
index d1bcdd3..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____             ____       _________
-# __|A   |___________|B   |_____|Z        |_______
-#
-#         ___________       ____           ____
-# _______|Z          |_____|R   |_________|S   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-4 2 Z
-2 1 B
-4 1 B
-4 2 R
-6 2 R
-4 1 Z
-8 1 Z
-6 2 S
-8 2 S
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysExpected
deleted file mode 100644
index ef56af5..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> R.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 R.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 R.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 S.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00  50.00   0.00  50.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 16 (100.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    50.00                      50.00               main
-         8                    50.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                      50.00               main
-         8                   100.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysTrace
deleted file mode 100644
index 0559a6a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeys
deleted file mode 100644
index 2bb68d7..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____             ____       _________
-# __|A   |___________|B   |_____|Z        |_______
-#
-#         ___________       ____           ____
-# _______|Z          |_____|A   |_________|B   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-4 2 Z
-2 1 B
-4 1 B
-4 2 A
-6 2 A
-4 1 Z
-8 1 Z
-6 2 B
-8 2 B
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysExpected
deleted file mode 100644
index 50b2b98..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 A.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00  50.00   0.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 16 (100.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    50.00                      50.00               main
-         8                    50.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysTrace
deleted file mode 100644
index f113fcf..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointCrossThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeys
deleted file mode 100644
index e7456c1..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,17 +0,0 @@
-#    ____  ____  ____  ________  ____  ____  ____
-# __|A   ||Z   ||B   ||Z       ||D   ||Z   ||E   |__
-#
-0 1 A
-2 1 A
-2 1 Z
-4 1 Z
-4 1 B
-6 1 B
-6 1 Z
-10 1 Z
-10 1 D
-12 1 D
-12 1 Z
-14 1 Z
-14 1 E
-16 1 E
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index 9349375..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> D.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> E.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      3/3              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 D.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 E.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      3/3              8 (toplevel)
-[1]     50.0%                     3+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 E.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       16  100.00 100.00  37.50  37.50   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 37.50% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 6 ( 37.50% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;7+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;E.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index 09983ba..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeys
deleted file mode 100644
index b51f81e..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,17 +0,0 @@
-#    ____  ____  ____  ________  ____  ____  ____
-# __|R   ||Z   ||S   ||Z       ||R   ||Z   ||S   |__
-#
-0 1 R
-2 1 R
-2 1 Z
-4 1 Z
-4 1 S
-6 1 S
-6 1 Z
-10 1 Z
-10 1 R
-12 1 R
-12 1 Z
-14 1 Z
-14 1 S
-16 1 S
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysExpected
deleted file mode 100644
index 41f9625..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> R.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      3/3              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 R.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      3/3              8 (toplevel)
-[1]     50.0%                     3+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       16  100.00 100.00  75.00   0.00  75.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;7+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysTrace
deleted file mode 100644
index 2cccf07..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeys
deleted file mode 100644
index d4e41a4..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,16 +0,0 @@
-#                                     ____
-#    ____  ____  ____  ________  ____|Z   |____
-# __|A   ||Z   ||B   ||Z       ||C             |__
-#
-0 1 A
-2 1 A
-2 1 Z
-4 1 Z
-4 1 B
-6 1 B
-6 1 Z
-10 1 Z
-10 1 C
-12 1  Z
-14 1  Z
-16 1 C
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysExpected
deleted file mode 100644
index d81cccc..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,216 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> C.m ()
-        2   12.50  87.50  <a href="#m3">[3]</a> A.m ()
-        2   12.50 100.00  <a href="#m4">[4]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                37.5%    <a href="#m2">[2]</a>      1/1              6 C.m ()
-                37.5%    <a href="#m1">[1]</a>      2/3              6 Z.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 B.m ()
-<a name="m1"></a>----------------------------------------------------
-                75.0%    <a href="#m0">[0]</a>      2/3              6 (toplevel)
-                25.0%    <a href="#m2">[2]</a>      1/3              2 C.m ()
-[1]     50.0%                     3+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              6 (toplevel)
-[2]     37.5%                     1+0              6 C.m ()
-                66.7%   excl                       4
-                33.3%    <a href="#m1">[1]</a>      1/3              2 Z.m ()
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       16  100.00 100.00  75.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; C</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;C.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysTrace
deleted file mode 100644
index 3f61656..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeys
deleted file mode 100644
index 0b3377d..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeys
+++ /dev/null
@@ -1,17 +0,0 @@
-#    ____  ____  ____  ________  ____  ____  ____
-# __|A   ||Z   ||B   ||Z       ||A   ||Z   ||B   |__
-#
-0 1 A
-2 1 A
-2 1 Z
-4 1 Z
-4 1 B
-6 1 B
-6 1 Z
-10 1 Z
-10 1 A
-12 1 A
-12 1 Z
-14 1 Z
-14 1 B
-16 1 B
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysExpected
deleted file mode 100644
index aa476b3..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      3/3              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 A.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      3/3              8 (toplevel)
-[1]     50.0%                     3+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       16  100.00 100.00  75.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;7+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysTrace
deleted file mode 100644
index c6ddbe5..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingDisjointSingleThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeys
deleted file mode 100644
index d87ac81..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____                       ____  ________
-# __|A   |_____________________|B   ||Z       |__
-#
-#         ____  ________  ____
-# _______|D   ||Z       ||E   |______________
-#
-#
-0 1 A
-2 1 A
-0 2 D
-2 2 D
-2 2 Z
-6 2 Z
-6 2 E
-8 2 E
-2 1 B
-4 1 B
-4 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index a97f25c..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> D.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> E.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 D.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 E.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 E.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00   0.00 100.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                      33.33               main
-         0                     0.00                      66.67               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                       0.00               main
-         8                   100.00                     100.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;E.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index 832bbfc..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeys
deleted file mode 100644
index 82ab142..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____                       ____  ________
-# __|R   |_____________________|S   ||Z       |__
-#
-#         ____  ________  ____
-# _______|R   ||Z       ||S   |______________
-#
-#
-0 1 R
-2 1 R
-0 2 R
-2 2 R
-2 2 Z
-6 2 Z
-6 2 S
-8 2 S
-2 1 S
-4 1 S
-4 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysExpected
deleted file mode 100644
index 623478e..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> R.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 R.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00  50.00      1 main
-        8   50.00 100.00 100.00   0.00 100.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                      33.33               main
-         8                    66.67                      66.67               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                      33.33               main
-         8                    66.67                      66.67               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysTrace
deleted file mode 100644
index 371f150..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeys
deleted file mode 100644
index 511543f..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____                       ____  ________
-# __|A   |_____________________|B   ||Z       |__
-#
-#         ____  ________  ____
-# _______|R   ||Z       ||S   |______________
-#
-#
-0 1 A
-2 1 A
-0 2 R
-2 2 R
-2 2 Z
-6 2 Z
-6 2 S
-8 2 S
-2 1 B
-4 1 B
-4 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysExpected
deleted file mode 100644
index 1193f5f..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> R.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 R.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 R.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 S.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00 100.00   0.00 100.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                      33.33               main
-         8                    66.67                      66.67               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                       0.00               main
-         8                   100.00                     100.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysTrace
deleted file mode 100644
index 9f87efc..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeys
deleted file mode 100644
index 6714ddd..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeys
+++ /dev/null
@@ -1,19 +0,0 @@
-#    ____                       ____  ________
-# __|A   |_____________________|B   ||Z       |__
-#
-#         ____  ________  ____
-# _______|A   ||Z       ||B   |______________
-#
-#
-0 1 A
-2 1 A
-0 2 A
-2 2 A
-2 2 Z
-6 2 Z
-6 2 B
-8 2 B
-2 1 B
-4 1 B
-4 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysExpected
deleted file mode 100644
index 79c2e63..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 A.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              8 (toplevel)
-[1]     50.0%                     2+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00 100.00   0.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-        12                   100.00                      33.33               main
-         8                    66.67                      66.67               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysTrace
deleted file mode 100644
index 74e4c53..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapCrossThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeys
deleted file mode 100644
index b92471f..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|A   ||D   ||E   ||B   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 D
-4 1 D
-4 1 E
-6 1 E
-6 1 B
-8 1 B
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index 3b2ffc8..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        2   20.00  20.00  <a href="#m1">[1]</a> A.m ()
-        2   20.00  40.00  <a href="#m2">[2]</a> B.m ()
-        2   20.00  60.00  <a href="#m3">[3]</a> D.m ()
-        2   20.00  80.00  <a href="#m4">[4]</a> E.m ()
-        2   20.00 100.00  <a href="#m5">[5]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                20.0%    <a href="#m1">[1]</a>      1/1              2 A.m ()
-                20.0%    <a href="#m2">[2]</a>      1/1              2 B.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 D.m ()
-                20.0%    <a href="#m4">[4]</a>      1/1              2 E.m ()
-                20.0%    <a href="#m5">[5]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[1]     20.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     20.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     20.0%                     1+0              2 E.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00  40.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 4 ( 40.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         4                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;60.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;60.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;E.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index c9c086c..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeys
deleted file mode 100644
index 27b2bf8..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|R   ||R   ||S   ||S   ||Z   |__
-#
-0 1 R
-2 1 R
-2 1 R
-4 1 R
-4 1 S
-6 1 S
-6 1 S
-8 1 S
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysExpected
deleted file mode 100644
index df55cd4..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   40.00  40.00  <a href="#m1">[1]</a> R.m ()
-        4   40.00  80.00  <a href="#m2">[2]</a> S.m ()
-        2   20.00 100.00  <a href="#m3">[3]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                40.0%    <a href="#m1">[1]</a>      2/2              4 R.m ()
-                40.0%    <a href="#m2">[2]</a>      2/2              4 S.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     40.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     40.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00   0.00  80.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;S.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysTrace
deleted file mode 100644
index 0afca4d..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeys
deleted file mode 100644
index a494716..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,11 +0,0 @@
-#    ____  ____  ____  ____
-# __|A   ||C   ||B   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 C
-4 1 C
-4 1 B
-6 1 B
-6 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysExpected
deleted file mode 100644
index 720d05a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,214 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 8
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        2   25.00  25.00  <a href="#m1">[1]</a> A.m ()
-        2   25.00  50.00  <a href="#m2">[2]</a> B.m ()
-        2   25.00  75.00  <a href="#m3">[3]</a> C.m ()
-        2   25.00 100.00  <a href="#m4">[4]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0              8 (toplevel)
-                 0.0%   excl                       0
-                25.0%    <a href="#m1">[1]</a>      1/1              2 A.m ()
-                25.0%    <a href="#m2">[2]</a>      1/1              2 B.m ()
-                25.0%    <a href="#m3">[3]</a>      1/1              2 C.m ()
-                25.0%    <a href="#m4">[4]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[1]     25.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     25.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     25.0%                     1+0              2 C.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     25.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8  100.00 100.00  75.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; C</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;C.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysTrace
deleted file mode 100644
index c5f9a3e..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeys
deleted file mode 100644
index bd645af..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|A   ||A   ||B   ||B   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 A
-4 1 A
-4 1 B
-6 1 B
-6 1 B
-8 1 B
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysExpected
deleted file mode 100644
index 0e8f300..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   40.00  40.00  <a href="#m1">[1]</a> A.m ()
-        4   40.00  80.00  <a href="#m2">[2]</a> B.m ()
-        2   20.00 100.00  <a href="#m3">[3]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                40.0%    <a href="#m1">[1]</a>      2/2              4 A.m ()
-                40.0%    <a href="#m2">[2]</a>      2/2              4 B.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     40.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     40.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysTrace
deleted file mode 100644
index 65e381a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingNestedOverlapSingleThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairCrossThread b/tools/dmtracedump/tests/filters/testWaitingPairCrossThread
deleted file mode 100644
index 6c93bc6..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairCrossThread
+++ /dev/null
@@ -1,14 +0,0 @@
-#    ____        ____  ____
-# __|A   |______|B   ||Z   |__
-#
-#          _____
-# ________|Z    |_________________
-#
-0 1 A
-2 1 A
-0 2 Z
-2 2 Z
-2 1 B
-4 1 B
-4 1 Z
-6 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadExpected b/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadExpected
deleted file mode 100644
index ed45fff..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 8
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        2   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0              8 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              4 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                25.0%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     50.0%                     2+0              4 Z.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     25.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     25.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        6   75.00  75.00  66.67   0.00   0.00      1 main
-        2   25.00 100.00   0.00   0.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                      66.67               main
-         0                     0.00                      33.33               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadTrace b/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadTrace
deleted file mode 100644
index 4e53dfd..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairCrossThreadTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairSingleThread b/tools/dmtracedump/tests/filters/testWaitingPairSingleThread
deleted file mode 100644
index 45375ca..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairSingleThread
+++ /dev/null
@@ -1,11 +0,0 @@
-#    ____  ____  ____  ____
-# __|A   ||Z   ||B   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 Z
-4 1 Z
-4 1 B
-6 1 B
-6 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadExpected b/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadExpected
deleted file mode 100644
index b3e2b3f..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 8
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        2   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0              8 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      2/2              4 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                25.0%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     50.0%                     2+0              4 Z.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     25.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     25.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8  100.00 100.00  75.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadTrace b/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadTrace
deleted file mode 100644
index 3f29843..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPairSingleThreadTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeys
deleted file mode 100644
index 05995f3..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,23 +0,0 @@
-#    ____             ____  ____      ____
-# __|A   |___________|B   ||Z   |____|Z   |_______
-#
-#         ____  ____             ____      ____
-# _______|Z   ||D   |___________|E   |____|Z   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-2 2 Z
-2 2 D
-4 2 D
-2 1 B
-4 1 B
-4 1 Z
-6 1 Z
-4 2 E
-6 2 E
-6 1 Z
-8 1 Z
-6 2 Z
-8 2 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index ba83cee..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> D.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> E.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      4/4              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 D.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 E.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      4/4              8 (toplevel)
-[1]     50.0%                     4+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 E.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00   0.00  50.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                      50.00               main
-         0                     0.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                      50.00               main
-         8                   100.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;E.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index 30fbe38..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeys
deleted file mode 100644
index f874464..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,23 +0,0 @@
-#    ____             ____  ____      ____
-# __|R   |___________|S   ||Z   |____|Z   |_______
-#
-#         ____  ____             ____      ____
-# _______|Z   ||R   |___________|S   |____|Z   |__
-#
-#
-0 1 R
-2 1 R
-0 2 Z
-2 2 Z
-2 2 R
-4 2 R
-2 1 S
-4 1 S
-4 1 Z
-6 1 Z
-4 2 S
-6 2 S
-6 1 Z
-8 1 Z
-6 2 Z
-8 2 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysExpected
deleted file mode 100644
index 93c4a05..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> R.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      4/4              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 R.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      4/4              8 (toplevel)
-[1]     50.0%                     4+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00  50.00      1 main
-        8   50.00 100.00  50.00   0.00  50.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    66.67                      50.00               main
-         8                    66.67                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    66.67                      50.00               main
-         8                    66.67                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysTrace
deleted file mode 100644
index 6dc1826..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeys
deleted file mode 100644
index bdc4373..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,23 +0,0 @@
-#    ____             ____  ____      ____
-# __|A   |___________|B   ||Z   |____|Z   |_______
-#
-#         ____  ____             ____      ____
-# _______|Z   ||R   |___________|S   |____|Z   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-2 2 Z
-2 2 R
-4 2 R
-2 1 B
-4 1 B
-4 1 Z
-6 1 Z
-4 2 S
-6 2 S
-6 1 Z
-8 1 Z
-6 2 Z
-8 2 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysExpected
deleted file mode 100644
index b154ed1..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        2   12.50  62.50  <a href="#m2">[2]</a> A.m ()
-        2   12.50  75.00  <a href="#m3">[3]</a> B.m ()
-        2   12.50  87.50  <a href="#m4">[4]</a> R.m ()
-        2   12.50 100.00  <a href="#m5">[5]</a> S.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      4/4              8 Z.m ()
-                12.5%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                12.5%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-                12.5%    <a href="#m4">[4]</a>      1/1              2 R.m ()
-                12.5%    <a href="#m5">[5]</a>      1/1              2 S.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      4/4              8 (toplevel)
-[1]     50.0%                     4+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     12.5%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     12.5%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     12.5%                     1+0              2 R.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     12.5%                     1+0              2 S.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00  50.00   0.00  50.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    66.67                      50.00               main
-         8                    66.67                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 8 ( 50.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         0                     0.00                      50.00               main
-         8                   100.00                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;62.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;&nbsp;87.5 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;12.5 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;62.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;&nbsp;87.5&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;12.5&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;S.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysTrace
deleted file mode 100644
index efb0b1b..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeys
deleted file mode 100644
index 552ae40..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeys
+++ /dev/null
@@ -1,23 +0,0 @@
-#    ____             ____  ____      ____
-# __|A   |___________|B   ||Z   |____|Z   |_______
-#
-#         ____  ____             ____      ____
-# _______|Z   ||A   |___________|B   |____|Z   |__
-#
-#
-0 1 A
-2 1 A
-0 2 Z
-2 2 Z
-2 2 A
-4 2 A
-2 1 B
-4 1 B
-4 1 Z
-6 1 Z
-4 2 B
-6 2 B
-6 1 Z
-8 1 Z
-6 2 Z
-8 2 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysExpected
deleted file mode 100644
index 82b0356..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 16
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        8   50.00  50.00  <a href="#m1">[1]</a> Z.m ()
-        4   25.00  75.00  <a href="#m2">[2]</a> A.m ()
-        4   25.00 100.00  <a href="#m3">[3]</a> B.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             16 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      4/4              8 Z.m ()
-                25.0%    <a href="#m2">[2]</a>      2/2              4 A.m ()
-                25.0%    <a href="#m3">[3]</a>      2/2              4 B.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      4/4              8 (toplevel)
-[1]     50.0%                     4+0              8 Z.m ()
-               100.0%   excl                       8
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     25.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[3]     25.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8   50.00  50.00  50.00   0.00   0.00      1 main
-        8   50.00 100.00  50.00   0.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 12 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                    66.67                      50.00               main
-         8                    66.67                      50.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;&nbsp;75.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;25.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;16 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;Z.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;&nbsp;75.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;25.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysTrace
deleted file mode 100644
index 497e925..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapCrossThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeys
deleted file mode 100644
index edf03c5..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|A   ||D   ||B   ||E   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 D
-4 1 D
-4 1 B
-6 1 B
-6 1 E
-8 1 E
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysExpected
deleted file mode 100644
index 2d59720..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysExpected
+++ /dev/null
@@ -1,232 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        2   20.00  20.00  <a href="#m1">[1]</a> A.m ()
-        2   20.00  40.00  <a href="#m2">[2]</a> B.m ()
-        2   20.00  60.00  <a href="#m3">[3]</a> D.m ()
-        2   20.00  80.00  <a href="#m4">[4]</a> E.m ()
-        2   20.00 100.00  <a href="#m5">[5]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                20.0%    <a href="#m1">[1]</a>      1/1              2 A.m ()
-                20.0%    <a href="#m2">[2]</a>      1/1              2 B.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 D.m ()
-                20.0%    <a href="#m4">[4]</a>      1/1              2 E.m ()
-                20.0%    <a href="#m5">[5]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[1]     20.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     20.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 D.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     20.0%                     1+0              2 E.m ()
-               100.0%   excl                       2
-<a name="m5"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[5]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  60.00  60.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 60.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 6 ( 60.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;60.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; D</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; E</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d4')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd4">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d4">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;60.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;D.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;E.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m5">[5]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysTrace
deleted file mode 100644
index b9afef4..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeys
deleted file mode 100644
index 27b2bf8..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|R   ||R   ||S   ||S   ||Z   |__
-#
-0 1 R
-2 1 R
-2 1 R
-4 1 R
-4 1 S
-6 1 S
-6 1 S
-8 1 S
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysExpected
deleted file mode 100644
index df55cd4..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysExpected
+++ /dev/null
@@ -1,210 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   40.00  40.00  <a href="#m1">[1]</a> R.m ()
-        4   40.00  80.00  <a href="#m2">[2]</a> S.m ()
-        2   20.00 100.00  <a href="#m3">[3]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                40.0%    <a href="#m1">[1]</a>      2/2              4 R.m ()
-                40.0%    <a href="#m2">[2]</a>      2/2              4 S.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     40.0%                     2+0              4 R.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     40.0%                     2+0              4 S.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00   0.00  80.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; R</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; S</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;R.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;S.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysTrace
deleted file mode 100644
index a52929a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadDiffFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeys
deleted file mode 100644
index c53c90d..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeys
+++ /dev/null
@@ -1,12 +0,0 @@
-#               ____
-#    ____  ____|B   |____  ____
-# __|A   ||C             ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 C
-4 1  B
-6 1  B
-8 1 C
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysExpected
deleted file mode 100644
index 18ce892..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysExpected
+++ /dev/null
@@ -1,214 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   40.00  40.00  <a href="#m1">[1]</a> C.m ()
-        2   20.00  60.00  <a href="#m2">[2]</a> A.m ()
-        2   20.00  80.00  <a href="#m3">[3]</a> B.m ()
-        2   20.00 100.00  <a href="#m4">[4]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                60.0%    <a href="#m1">[1]</a>      1/1              6 C.m ()
-                20.0%    <a href="#m2">[2]</a>      1/1              2 A.m ()
-                20.0%    <a href="#m4">[4]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              6 (toplevel)
-[1]     60.0%                     1+0              6 C.m ()
-                66.7%   excl                       4
-                33.3%    <a href="#m3">[3]</a>      1/1              2 B.m ()
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[2]     20.0%                     1+0              2 A.m ()
-               100.0%   excl                       2
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m1">[1]</a>      1/1              2 C.m ()
-[3]     20.0%                     1+0              2 B.m ()
-               100.0%   excl                       2
-<a name="m4"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[4]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; C</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;60.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d3')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd3">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d3">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;C.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;60.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m4">[4]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysTrace
deleted file mode 100644
index 23f4187..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterDiffKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeys b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeys
deleted file mode 100644
index bd645af..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeys
+++ /dev/null
@@ -1,13 +0,0 @@
-#    ____  ____  ____  ____  ____
-# __|A   ||A   ||B   ||B   ||Z   |__
-#
-0 1 A
-2 1 A
-2 1 A
-4 1 A
-4 1 B
-6 1 B
-6 1 B
-8 1 B
-8 1 Z
-10 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysExpected b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysExpected
deleted file mode 100644
index 0e8f300..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysExpected
+++ /dev/null
@@ -1,203 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 10
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   40.00  40.00  <a href="#m1">[1]</a> A.m ()
-        4   40.00  80.00  <a href="#m2">[2]</a> B.m ()
-        2   20.00 100.00  <a href="#m3">[3]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0             10 (toplevel)
-                 0.0%   excl                       0
-                40.0%    <a href="#m1">[1]</a>      2/2              4 A.m ()
-                40.0%    <a href="#m2">[2]</a>      2/2              4 B.m ()
-                20.0%    <a href="#m3">[3]</a>      1/1              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[1]     40.0%                     2+0              4 A.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     40.0%                     2+0              4 B.m ()
-               100.0%   excl                       4
-<a name="m3"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              2 (toplevel)
-[3]     20.0%                     1+0              2 Z.m ()
-               100.0%   excl                       2
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-       10  100.00 100.00  80.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 8 ( 80.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         8                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; A</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;40.0 &nbsp;&nbsp;&nbsp;80.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; B</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d2')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd2">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2 &nbsp;&nbsp;&nbsp;20.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d2">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;10 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;A.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;40.0&nbsp;&nbsp;&nbsp;&nbsp;80.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;B.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;20.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m3">[3]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysTrace b/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysTrace
deleted file mode 100644
index 01e95cd..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingPartialOverlapSingleThreadSameFilterSameKeysTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThread b/tools/dmtracedump/tests/filters/testWaitingSoloCrossThread
deleted file mode 100644
index c9dbd1a..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThread
+++ /dev/null
@@ -1,12 +0,0 @@
-#    ____       ____  ____
-# __|C              ||Z   |__
-#
-#          ____
-# ________|Z   |_____________
-#
-0 1 C
-0 2 Z
-2 2 Z
-4 1 C
-4 1 Z
-6 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadExpected b/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadExpected
deleted file mode 100644
index 409b17e..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadExpected
+++ /dev/null
@@ -1,192 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 8
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   50.00  50.00  <a href="#m1">[1]</a> C.m ()
-        4   50.00 100.00  <a href="#m2">[2]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0              8 (toplevel)
-                 0.0%   excl                       0
-                50.0%    <a href="#m1">[1]</a>      1/1              4 C.m ()
-                50.0%    <a href="#m2">[2]</a>      2/2              4 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              4 (toplevel)
-[1]     50.0%                     1+0              4 C.m ()
-               100.0%   excl                       4
-<a name="m2"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      2/2              4 (toplevel)
-[2]     50.0%                     2+0              4 Z.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        6   75.00  75.00  66.67   0.00   0.00      1 main
-        2   25.00 100.00   0.00   0.00   0.00      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                      66.67               main
-         0                     0.00                      33.33               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; C</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;C.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadTrace b/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadTrace
deleted file mode 100644
index e73f040..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloCrossThreadTrace
+++ /dev/null
Binary files differ
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThread b/tools/dmtracedump/tests/filters/testWaitingSoloSingleThread
deleted file mode 100644
index 3f0753e..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThread
+++ /dev/null
@@ -1,10 +0,0 @@
-#         _____
-#    ____|Z    |____  ____
-# __|C              ||Z   |__
-#
-0 1 C
-2 1  Z
-4 1  Z
-6 1 C
-6 1 Z
-8 1 Z
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadExpected b/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadExpected
deleted file mode 100644
index 8d22b63..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadExpected
+++ /dev/null
@@ -1,194 +0,0 @@
-<html>
-<head>
-<script type="text/javascript" src="(null)sortable.js"></script>
-<script langugage="javascript">
-function toggle(item) {
-    obj=document.getElementById(item);
-    visible=(obj.style.display!="none" && obj.style.display!="");
-    key=document.getElementById("x" + item);
-    if (visible) {
-        obj.style.display="none";
-        key.innerHTML="+";
-    } else {
-        obj.style.display="block";
-        key.innerHTML="-";
-    }
-}
-function onMouseOver(obj) {
-    obj.style.background="lightblue";
-}
-function onMouseOut(obj) {
-    obj.style.background="white";
-}
-</script>
-<style type="text/css">
-div { font-family: courier; font-size: 13 }
-div.parent { margin-left: 15; display: none }
-div.leaf { margin-left: 10 }
-div.header { margin-left: 10 }
-div.link { margin-left: 10; cursor: move }
-span.parent { padding-right: 10; }
-span.leaf { padding-right: 10; }
-a img { border: 0;}
-table.sortable th { border-width: 0px 1px 1px 1px; background-color: #ccc;}
-a { text-decoration: none; }
-a:hover { text-decoration: underline; }
-table.sortable th, table.sortable td { text-align: left;}table.sortable tr.odd td { background-color: #ddd; }
-table.sortable tr.even td { background-color: #fff; }
-</style>
-</head><body>
-
-<a name="contents"></a>
-<h2>Table of Contents</h2>
-<ul>
-  <li><a href="#exclusive">Exclusive profile</a></li>
-  <li><a href="#inclusive">Inclusive profile</a></li>
-  <li><a href="#thread">Thread profile</a></li>
-  <li><a href="#class">Class/method profile</a></li>
-  <li><a href="#method">Method/class profile</a></li>
-</ul>
-
-<a name="exclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-Total cycles: 8
-
-<br><br>
-Exclusive elapsed times for each method, not including time spent in
-children, sorted by exclusive time.
-
-<br><br>
-<pre>
-    Usecs  self %  sum %  Method
-        4   50.00  50.00  <a href="#m1">[1]</a> C.m ()
-        4   50.00 100.00  <a href="#m2">[2]</a> Z.m ()
-</pre>
-<a name="inclusive"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Inclusive elapsed times for each method and its parents and children,
-sorted by inclusive time.
-
-<br><br>
-<pre>
-index  %/total %/self  index     calls         usecs name
-<a name="m0"></a>----------------------------------------------------
-[0]    100.0%                     0+0              8 (toplevel)
-                 0.0%   excl                       0
-                75.0%    <a href="#m1">[1]</a>      1/1              6 C.m ()
-                25.0%    <a href="#m2">[2]</a>      1/2              2 Z.m ()
-<a name="m1"></a>----------------------------------------------------
-               100.0%    <a href="#m0">[0]</a>      1/1              6 (toplevel)
-[1]     75.0%                     1+0              6 C.m ()
-                66.7%   excl                       4
-                33.3%    <a href="#m2">[2]</a>      1/2              2 Z.m ()
-<a name="m2"></a>----------------------------------------------------
-                50.0%    <a href="#m0">[0]</a>      1/2              2 (toplevel)
-                50.0%    <a href="#m1">[1]</a>      1/2              2 C.m ()
-[2]     50.0%                     2+0              4 Z.m ()
-               100.0%   excl                       4
-</pre>
-<a name="thread"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Elapsed times for each thread, sorted by elapsed time.
-Also includes percentage of time spent during the <i>execution</i> of any filters.
-
-<br><br>
-<pre>
-    Usecs   self %  sum %  FirstFilter %  SecondFilter %  RepeatedFilter %  tid   ThreadName
-        8  100.00 100.00  75.00   0.00   0.00      1 main
-        0    0.00 100.00    nan    nan    nan      2 foo
-        0    0.00 100.00    nan    nan    nan      3 bar
-        0    0.00 100.00    nan    nan    nan      4 blah
-</pre><br />
-
-Break-down of portion of time spent by each thread while waiting on a filter method.
-<br/><br/>
-<pre>
-Filter: FirstFilter
-Total waiting cycles: 6 ( 75.00% of total)
-Details: 
-
- Waiting cycles    % of total waiting time   execution time while waiting    thread name
-         6                   100.00                     100.00               main
-         0                     0.00                       0.00               foo
-         0                     0.00                       0.00               bar
-         0                     0.00                       0.00               blah
-</pre>
-<br/><br/>
-<pre>
-Filter: SecondFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<br/><br/>
-<pre>
-Filter: RepeatedFilter
-Total waiting cycles: 0 (  0.00% of total)
-</pre>
-<a name="class"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each class, summed over all the methods
-in the class.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Class</div>
-<div class="link" onClick="javascript:toggle('d0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; C</div>
-<div class="parent" id="d0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;m&nbsp;()</div>
-</div>
-<div class="link" onClick="javascript:toggle('d1')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xd1">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4 &nbsp;&nbsp;&nbsp;50.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Z</div>
-<div class="parent" id="d1">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;m&nbsp;()</div>
-</div>
-<a name="method"></a>
-<hr>
-<a href="#contents">[Top]</a>
-<a href="#exclusive">[Exclusive]</a>
-<a href="#inclusive">[Inclusive]</a>
-<a href="#thread">[Thread]</a>
-<a href="#class">[Class]</a>
-<a href="#method">[Method]</a>
-<br><br>
-
-Exclusive elapsed time for each method, summed over all the classes
-that contain a method with the same name.
-
-<br><br>
-<div class="header"><span class="parent">&nbsp;</span>&nbsp;&nbsp;&nbsp;Cycles %/total Cumul.% &nbsp;Calls+Recur&nbsp; Method</div>
-<div class="link" onClick="javascript:toggle('e0')" onMouseOver="javascript:onMouseOver(this)" onMouseOut="javascript:onMouseOut(this)"><span class="parent" id="xe0">+</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;8 &nbsp;&nbsp;100.0 &nbsp;&nbsp;100.0 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; m</div>
-<div class="parent" id="e0">
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;1+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m1">[1]</a>&nbsp;C.m&nbsp;()</div>
-<div class="leaf"><span class="leaf">&nbsp;</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;4&nbsp;&nbsp;&nbsp;&nbsp;50.0&nbsp;&nbsp;&nbsp;100.0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2+0&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href="#m2">[2]</a>&nbsp;Z.m&nbsp;()</div>
-</div>
-
-</body>
-</html>
diff --git a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadTrace b/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadTrace
deleted file mode 100644
index 3a43c46..0000000
--- a/tools/dmtracedump/tests/filters/testWaitingSoloSingleThreadTrace
+++ /dev/null
Binary files differ
diff --git a/tools/gclog.py b/tools/gclog.py
deleted file mode 100755
index d008f6a..0000000
--- a/tools/gclog.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Parse event log output, looking for GC events.  Format them for human
-# consumption.
-#
-# ALL OUTPUT VALUES ARE APPROXIMATE.  The event log data format uses a
-# 12-bit floating-point representation, which means there aren't enough
-# bits to accurately represent anything but small integers.  Larger
-# values will be rounded off.
-#
-# The data is generated by dalvik/vm/alloc/HeapDebug.c.
-#
-
-import getopt
-import sys
-import os
-import re
-import time
-
-DEBUG = False       # DEBUG is a global variable
-
-
-def unfloat12(f12):
-    """Unpack a float12 value"""
-    if f12 < 0:
-        raise DataParseError, "bad float12 value %s" % f12
-    return (f12 & 0x1ff) << ((f12 >> 9) * 4)
-
-
-def parseGlobalInfo(value):
-    """Parse event0 (global info)"""
-    value = int(value)
-
-    # Global information:
-    #
-    # [63   ] Must be zero
-    # [62-24] ASCII process identifier
-    # [23-12] GC time in ms
-    # [11- 0] Bytes freed
-    id = (value >> 24) & 0xffffffffff
-    gctime = unfloat12((value >> 12) & 0xfff)
-    bytes_freed = unfloat12(value & 0xfff)
-
-    idstr = "%c%c%c%c%c" % ( \
-            (id >> 32) & 0xff, \
-            (id >> 24) & 0xff, \
-            (id >> 16) & 0xff, \
-            (id >> 8) & 0xff, \
-            id & 0xff )
-
-    return ( idstr, gctime, bytes_freed )
-
-
-def parseAggHeapStats(value):
-    """Parse event1 (aggregated heap stats)"""
-    value = int(value)
-
-    # Aggregated heap stats:
-    #
-    # [63-62] 10
-    # [61-60] Reserved; must be zero
-    # [59-48] Objects freed
-    # [47-36] Actual size (current footprint)
-    # [35-24] Allowed size (current hard max)
-    # [23-12] Objects allocated
-    # [11- 0] Bytes allocated
-    freed = unfloat12((value >> 48) & 0xfff)
-    footprint = unfloat12((value >> 36) & 0xfff)
-    allowed = unfloat12((value >> 24) & 0xfff)
-    objs = unfloat12((value >> 12) & 0xfff)
-    bytes = unfloat12(value & 0xfff)
-
-    return ( freed, footprint, allowed, objs, bytes )
-
-
-def parseZygoteStats(value):
-    """Parse event2 (zygote heap stats)"""
-    value = int(value)
-
-    # Zygote heap stats (except for the soft limit, which belongs to the
-    # active heap):
-    #
-    # [63-62] 11
-    # [61-60] Reserved; must be zero
-    # [59-48] Soft Limit (for the active heap)
-    # [47-36] Actual size (current footprint)
-    # [35-24] Allowed size (current hard max)
-    # [23-12] Objects allocated
-    # [11- 0] Bytes allocated
-    soft_limit = unfloat12((value >> 48) & 0xfff)
-    actual = unfloat12((value >> 36) & 0xfff)
-    allowed = unfloat12((value >> 24) & 0xfff)
-    objs = unfloat12((value >> 12) & 0xfff)
-    bytes = unfloat12(value & 0xfff)
-
-    return ( soft_limit, actual, allowed, objs, bytes )
-
-
-def parseExternalStats(value):
-    """Parse event3 (external allocation stats)"""
-    value = int(value)
-
-    # Report the current external allocation stats and the native heap
-    # summary.
-    #
-    # [63-48] Reserved; must be zero (TODO: put new data in these slots)
-    # [47-36] dlmalloc_footprint
-    # [35-24] mallinfo: total allocated space
-    # [23-12] External byte limit
-    # [11- 0] External bytes allocated
-    footprint = unfloat12((value >> 36) & 0xfff)    # currently disabled
-    total = unfloat12((value >> 24) & 0xfff)        # currently disabled
-    limit = unfloat12((value >> 12) & 0xfff)
-    bytes = unfloat12(value & 0xfff)
-
-    return ( footprint, total, limit, bytes )
-
-
-def handleGcInfo(procFilter, timestamp, pid, values):
-    """Handle a single dvm_gc_info event"""
-
-    pid = int(pid)
-
-    global_info = parseGlobalInfo(values[0])
-
-    if len(procFilter) > 0:
-        if global_info[0] != procFilter:
-            return
-
-    heap_stats = parseAggHeapStats(values[1])
-    zygote = parseZygoteStats(values[2])
-    external = parseExternalStats(values[3])
-
-    print "%s %s(%d) softlim=%dKB, extlim=%dKB, extalloc=%dKB" % \
-            (timestamp, global_info[0], pid, zygote[0]/1024, external[2]/1024, external[3]/1024)
-
-    if DEBUG:
-        # print "RAW: %s %s (%s,%s,%s,%s)" % \
-        #        (timestamp, pid, values[0], values[1], values[2], values[3])
-
-        print "+ id=\"%s\" time=%d freed=%d" % (global_info[0], global_info[1], global_info[2])
-        print "+  freed=%d foot=%d allow=%d objs=%d bytes=%d" % \
-                (heap_stats[0], heap_stats[1], heap_stats[2], heap_stats[3], heap_stats[4])
-        print "+  soft=%d act=%d allow=%d objs=%d bytes=%d" % \
-                (zygote[0], zygote[1], zygote[2], zygote[3], zygote[4])
-        print "+  foot=%d total=%d limit=%d alloc=%d" % \
-                (external[0], external[1], external[2], external[3])
-
-    print "  freed %d objects / %d bytes in %dms" % \
-            (heap_stats[0], global_info[2], global_info[1])
-
-
-def filterInput(logPipe, processFilter):
-    """Loop until EOF, pulling out GC events"""
-
-    # 04-29 20:31:00.334 I/dvm_gc_info(   69): [8320808730292729543,-8916699241518090181,-4006371297196337158,8165229]
-    gc_info_re = re.compile(r"""
-        (\d+-\d+\ \d+:\d+:\d+)\.\d+     # extract the date (#1), ignoring ms
-        .*                              # filler, usually " I/"
-        dvm_gc_info                     # only interested in GC info lines
-        \(\s*(\d+)\)                    # extract the pid (#2)
-        :\ \[                           # filler
-        ([0-9-]+),([0-9-]+),([0-9-]+),([0-9-]+) # four values, may be negative
-        \].*                            # junk to end of line
-        """, re.VERBOSE)
-
-    while True:
-        line = logPipe.readline()
-        if not line:
-            print "EOF hit"
-            return
-
-        match = gc_info_re.match(line)
-        if not match:
-            #print "no match on %s" % line.strip()
-            continue
-        else:
-            handleGcInfo(processFilter, match.group(1), match.group(2), ( match.group(3), \
-                    match.group(4), match.group(5), match.group(6) ) )
-
-def PrintUsage():
-  print "usage: %s [-p procFilter] [-d]" % sys.argv[0]
-
-
-def start():
-    """Entry point"""
-
-    global DEBUG
-
-    procFilter = ""
-
-    opts, args = getopt.getopt(sys.argv[1:], "hdp:")
-
-    for opt, val in opts:
-        if opt == "-h":
-            PrintUsage()
-            sys.exit(2)
-        elif opt == "-p":
-            procFilter = val
-        elif opt == "-d":
-            DEBUG = True
-
-    print "procfilter = %s" % procFilter
-    print "DEBUG = %s" % DEBUG
-
-    # launch a logcat and read from it
-    command = 'adb logcat -v time -b events'
-    logPipe = os.popen(command)
-
-
-    try:
-        filterInput(logPipe, procFilter)
-    except KeyboardInterrupt, err:
-        print "Stopping on keyboard interrupt."
-
-    logPipe.close()
-
-
-start()
diff --git a/vm/AllocTracker.c b/vm/AllocTracker.c
index d371fd1..8e2c325 100644
--- a/vm/AllocTracker.c
+++ b/vm/AllocTracker.c
@@ -155,7 +155,7 @@
         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
         const Method* method = saveArea->method;
 
-        if (!dvmIsBreakFrame(fp)) {
+        if (!dvmIsBreakFrame((u4*) fp)) {
             pRec->stackElem[stackDepth].method = method;
             if (dvmIsNativeMethod(method)) {
                 pRec->stackElem[stackDepth].pc = 0;
diff --git a/vm/Android.mk b/vm/Android.mk
index aa04655..1ef5a97 100644
--- a/vm/Android.mk
+++ b/vm/Android.mk
@@ -86,13 +86,6 @@
     LOCAL_MODULE := libdvm_interp
     include $(BUILD_SHARED_LIBRARY)
 
-    # Derivation #4
-    WITH_JIT := true
-    include $(LOCAL_PATH)/ReconfigureDvm.mk
-
-    LOCAL_CFLAGS += $(target_smp_flag) -DWITH_INLINE_PROFILING
-    LOCAL_MODULE := libdvm_traceview
-    include $(BUILD_SHARED_LIBRARY)
 endif
 
 #
diff --git a/vm/BitVector.c b/vm/BitVector.c
new file mode 100644
index 0000000..b3d36ca
--- /dev/null
+++ b/vm/BitVector.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Implementation of an expandable bit vector.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+#define kBitVectorGrowth    4   /* increase by 4 u4s when limit hit */
+
+
+/*
+ * Allocate a bit vector with enough space to hold at least the specified
+ * number of bits.
+ */
+BitVector* dvmAllocBitVector(unsigned int startBits, bool expandable)
+{
+    BitVector* bv;
+    unsigned int count;
+
+    assert(sizeof(bv->storage[0]) == 4);        /* assuming 32-bit units */
+
+    bv = (BitVector*) malloc(sizeof(BitVector));
+
+    count = (startBits + 31) >> 5;
+
+    bv->storageSize = count;
+    bv->expandable = expandable;
+    bv->storage = (u4*) malloc(count * sizeof(u4));
+    memset(bv->storage, 0x00, count * sizeof(u4));
+    return bv;
+}
+
+/*
+ * Free a BitVector.
+ */
+void dvmFreeBitVector(BitVector* pBits)
+{
+    if (pBits == NULL)
+        return;
+
+    free(pBits->storage);
+    free(pBits);
+}
+
+/*
+ * "Allocate" the first-available bit in the bitmap.
+ *
+ * This is not synchronized.  The caller is expected to hold some sort of
+ * lock that prevents multiple threads from executing simultaneously in
+ * dvmAllocBit/dvmFreeBit.
+ */
+int dvmAllocBit(BitVector* pBits)
+{
+    unsigned int word, bit;
+
+retry:
+    for (word = 0; word < pBits->storageSize; word++) {
+        if (pBits->storage[word] != 0xffffffff) {
+            /*
+             * There are unallocated bits in this word.  Return the first.
+             */
+            bit = ffs(~(pBits->storage[word])) -1;
+            assert(bit < 32);
+            pBits->storage[word] |= 1 << bit;
+            return (word << 5) | bit;
+        }
+    }
+
+    /*
+     * Ran out of space, allocate more if we're allowed to.
+     */
+    if (!pBits->expandable)
+        return -1;
+
+    pBits->storage = (u4*)realloc(pBits->storage,
+                    (pBits->storageSize + kBitVectorGrowth) * sizeof(u4));
+    memset(&pBits->storage[pBits->storageSize], 0x00,
+        kBitVectorGrowth * sizeof(u4));
+    pBits->storageSize += kBitVectorGrowth;
+    goto retry;
+}
+
+/*
+ * Mark the specified bit as "set".
+ */
+void dvmSetBit(BitVector* pBits, unsigned int num)
+{
+    if (num >= pBits->storageSize * sizeof(u4) * 8) {
+        if (!pBits->expandable) {
+            LOGE("Attempt to set bit outside valid range (%d, limit is %d)\n",
+                num, pBits->storageSize * sizeof(u4) * 8);
+            dvmAbort();
+        }
+
+        /* Round up to word boundaries for "num+1" bits */
+        unsigned int newSize = (num + 1 + 31) >> 5;
+        assert(newSize > pBits->storageSize);
+        pBits->storage = (u4*)realloc(pBits->storage, newSize * sizeof(u4));
+        if (pBits->storage == NULL) {
+            LOGE("BitVector expansion to %d failed\n", newSize * sizeof(u4));
+            dvmAbort();
+        }
+        memset(&pBits->storage[pBits->storageSize], 0x00,
+            (newSize - pBits->storageSize) * sizeof(u4));
+        pBits->storageSize = newSize;
+    }
+
+    pBits->storage[num >> 5] |= 1 << (num & 0x1f);
+}
+
+/*
+ * Mark the specified bit as "clear".
+ */
+void dvmClearBit(BitVector* pBits, unsigned int num)
+{
+    assert(num < pBits->storageSize * sizeof(u4) * 8);
+
+    pBits->storage[num >> 5] &= ~(1 << (num & 0x1f));
+}
+
+/*
+ * Mark all bits bit as "clear".
+ */
+void dvmClearAllBits(BitVector* pBits)
+{
+    unsigned int count = pBits->storageSize;
+    memset(pBits->storage, 0, count * sizeof(u4));
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void dvmSetInitialBits(BitVector* pBits, unsigned int numBits)
+{
+    unsigned int idx;
+    assert(((numBits + 31) >> 5) <= pBits->storageSize);
+    for (idx = 0; idx < (numBits >> 5); idx++) {
+        pBits->storage[idx] = -1;
+    }
+    unsigned int remNumBits = numBits & 0x1f;
+    if (remNumBits) {
+        pBits->storage[idx] = (1 << remNumBits) - 1;
+    }
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool dvmIsBitSet(const BitVector* pBits, unsigned int num)
+{
+    assert(num < pBits->storageSize * sizeof(u4) * 8);
+
+    unsigned int val = pBits->storage[num >> 5] & (1 << (num & 0x1f));
+    return (val != 0);
+}
+
+/*
+ * Count the number of bits that are set.
+ */
+int dvmCountSetBits(const BitVector* pBits)
+{
+    unsigned int word;
+    unsigned int count = 0;
+
+    for (word = 0; word < pBits->storageSize; word++) {
+        u4 val = pBits->storage[word];
+
+        if (val != 0) {
+            if (val == 0xffffffff) {
+                count += 32;
+            } else {
+                /* count the number of '1' bits */
+                while (val != 0) {
+                    val &= val - 1;
+                    count++;
+                }
+            }
+        }
+    }
+
+    return count;
+}
+
+/*
+ * If the vector sizes don't match, log an error and abort.
+ */
+static void checkSizes(const BitVector* bv1, const BitVector* bv2)
+{
+    if (bv1->storageSize != bv2->storageSize) {
+        LOGE("Mismatched vector sizes (%d, %d)\n",
+            bv1->storageSize, bv2->storageSize);
+        dvmAbort();
+    }
+}
+
+/*
+ * Copy a whole vector to the other. Only do that when the both vectors have
+ * the same size.
+ */
+void dvmCopyBitVector(BitVector *dest, const BitVector *src)
+{
+    /* if dest is expandable and < src, we could expand dest to match */
+    checkSizes(dest, src);
+
+    memcpy(dest->storage, src->storage, sizeof(u4) * dest->storageSize);
+}
+
+/*
+ * Intersect two bit vectors and store the result to the dest vector.
+ */
+bool dvmIntersectBitVectors(BitVector *dest, const BitVector *src1,
+                            const BitVector *src2)
+{
+    if (dest->storageSize != src1->storageSize ||
+        dest->storageSize != src2->storageSize ||
+        dest->expandable != src1->expandable ||
+        dest->expandable != src2->expandable)
+        return false;
+
+    unsigned int idx;
+    for (idx = 0; idx < dest->storageSize; idx++) {
+        dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
+    }
+    return true;
+}
+
+/*
+ * Unify two bit vectors and store the result to the dest vector.
+ */
+bool dvmUnifyBitVectors(BitVector *dest, const BitVector *src1,
+                        const BitVector *src2)
+{
+    if (dest->storageSize != src1->storageSize ||
+        dest->storageSize != src2->storageSize ||
+        dest->expandable != src1->expandable ||
+        dest->expandable != src2->expandable)
+        return false;
+
+    unsigned int idx;
+    for (idx = 0; idx < dest->storageSize; idx++) {
+        dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
+    }
+    return true;
+}
+
+/*
+ * Compare two bit vectors and return true if difference is seen.
+ */
+bool dvmCompareBitVectors(const BitVector *src1, const BitVector *src2)
+{
+    if (src1->storageSize != src2->storageSize ||
+        src1->expandable != src2->expandable)
+        return true;
+
+    unsigned int idx;
+    for (idx = 0; idx < src1->storageSize; idx++) {
+        if (src1->storage[idx] != src2->storage[idx]) return true;
+    }
+    return false;
+}
+
+/* Initialize the iterator structure */
+void dvmBitVectorIteratorInit(BitVector* pBits, BitVectorIterator* iterator)
+{
+    iterator->pBits = pBits;
+    iterator->bitSize = pBits->storageSize * sizeof(u4) * 8;
+    iterator->idx = 0;
+}
+
+/* Return the next position set to 1. -1 means end-of-element reached */
+int dvmBitVectorIteratorNext(BitVectorIterator* iterator)
+{
+    const BitVector* pBits = iterator->pBits;
+    u4 bitIndex = iterator->idx;
+
+    assert(iterator->bitSize == pBits->storageSize * sizeof(u4) * 8);
+    if (bitIndex >= iterator->bitSize) return -1;
+
+    for (; bitIndex < iterator->bitSize; bitIndex++) {
+        unsigned int wordIndex = bitIndex >> 5;
+        unsigned int mask = 1 << (bitIndex & 0x1f);
+        if (pBits->storage[wordIndex] & mask) {
+            iterator->idx = bitIndex+1;
+            return bitIndex;
+        }
+    }
+    /* No more set bits */
+    return -1;
+}
+
+
+/*
+ * Merge the contents of "src" into "dst", checking to see if this causes
+ * any changes to occur.  This is a logical OR.
+ *
+ * Returns "true" if the contents of the destination vector were modified.
+ */
+bool dvmCheckMergeBitVectors(BitVector* dst, const BitVector* src)
+{
+    bool changed = false;
+
+    checkSizes(dst, src);
+
+    unsigned int idx;
+    for (idx = 0; idx < dst->storageSize; idx++) {
+        u4 merged = src->storage[idx] | dst->storage[idx];
+        if (dst->storage[idx] != merged) {
+            dst->storage[idx] = merged;
+            changed = true;
+        }
+    }
+
+    return changed;
+}
diff --git a/vm/BitVector.h b/vm/BitVector.h
new file mode 100644
index 0000000..d1a0ca3
--- /dev/null
+++ b/vm/BitVector.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Miscellaneous utility functions.
+ */
+#ifndef _DALVIK_BITVECTOR
+#define _DALVIK_BITVECTOR
+
+/*
+ * Expanding bitmap, used for tracking resources.  Bits are numbered starting
+ * from zero.
+ *
+ * All operations on a BitVector are unsynchronized.
+ */
+typedef struct BitVector {
+    bool    expandable;     /* expand bitmap if we run out? */
+    u4      storageSize;    /* current size, in 32-bit words */
+    u4*     storage;
+} BitVector;
+
+/* Handy iterator to walk through the bit positions set to 1 */
+typedef struct BitVectorIterator {
+    BitVector *pBits;
+    u4 idx;
+    u4 bitSize;
+} BitVectorIterator;
+
+/* allocate a bit vector with enough space to hold "startBits" bits */
+BitVector* dvmAllocBitVector(unsigned int startBits, bool expandable);
+void dvmFreeBitVector(BitVector* pBits);
+
+/*
+ * dvmAllocBit always allocates the first possible bit.  If we run out of
+ * space in the bitmap, and it's not marked expandable, dvmAllocBit
+ * returns -1.
+ *
+ * dvmSetBit sets the specified bit, expanding the vector if necessary
+ * (and possible).  Attempting to set a bit past the limit of a non-expandable
+ * bit vector will cause a fatal error.
+ *
+ * dvmSetInitialBits sets all bits in [0..numBits-1]. Won't expand the vector.
+ *
+ * dvmIsBitSet returns "true" if the bit is set.
+ */
+int dvmAllocBit(BitVector* pBits);
+void dvmSetBit(BitVector* pBits, unsigned int num);
+void dvmClearBit(BitVector* pBits, unsigned int num);
+void dvmClearAllBits(BitVector* pBits);
+void dvmSetInitialBits(BitVector* pBits, unsigned int numBits);
+bool dvmIsBitSet(const BitVector* pBits, unsigned int num);
+
+/* count the number of bits that have been set */
+int dvmCountSetBits(const BitVector* pBits);
+
+/* copy one vector to another of equal size */
+void dvmCopyBitVector(BitVector *dest, const BitVector *src);
+
+/*
+ * Intersect two bit vectors and store the result to the dest vector.
+ */
+bool dvmIntersectBitVectors(BitVector *dest, const BitVector *src1,
+                            const BitVector *src2);
+
+/*
+ * Unify two bit vectors and store the result to the dest vector.
+ */
+bool dvmUnifyBitVectors(BitVector *dest, const BitVector *src1,
+                        const BitVector *src2);
+
+/*
+ * Merge the contents of "src" into "dst", checking to see if this causes
+ * any changes to occur.
+ *
+ * Returns "true" if the contents of the destination vector were modified.
+ */
+bool dvmCheckMergeBitVectors(BitVector* dst, const BitVector* src);
+
+/*
+ * Compare two bit vectors and return true if difference is seen.
+ */
+bool dvmCompareBitVectors(const BitVector *src1, const BitVector *src2);
+
+/* Initialize the iterator structure */
+void dvmBitVectorIteratorInit(BitVector* pBits, BitVectorIterator* iterator);
+
+/* Return the next position set to 1. -1 means end-of-vector reached */
+int dvmBitVectorIteratorNext(BitVectorIterator* iterator);
+
+#endif /*_DALVIK_BITVECTOR*/
diff --git a/vm/CheckJni.c b/vm/CheckJni.c
index fa65b50..922646e 100644
--- a/vm/CheckJni.c
+++ b/vm/CheckJni.c
@@ -255,10 +255,6 @@
     checkVirtualMethod(_env, _obj, _methid, __FUNCTION__)
 #define CHECK_STATIC_METHOD(_env, _clazz, _methid)                          \
     checkStaticMethod(_env, _clazz, _methid, __FUNCTION__)
-#define CHECK_METHOD_ARGS_A(_env, _methid, _args)                           \
-    checkMethodArgsA(_env, _methid, _args, __FUNCTION__)
-#define CHECK_METHOD_ARGS_V(_env, _methid, _args)                           \
-    checkMethodArgsV(_env, _methid, _args, __FUNCTION__)
 
 /*
  * Prints trace messages when a native method calls a JNI function such as
@@ -350,8 +346,7 @@
         printWarn = true;
 
         /* this is a bad idea -- need to throw as we exit, or abort func */
-        //dvmThrowException("Ljava/lang/RuntimeException;",
-        //    "invalid use of JNI env ptr");
+        //dvmThrowRuntimeException("invalid use of JNI env ptr");
     } else if (((JNIEnvExt*) env)->self != dvmThreadSelf()) {
         /* correct JNIEnv*; make sure the "self" pointer is correct */
         LOGE("JNI ERROR: env->self != thread-self (%p vs. %p)",
@@ -479,14 +474,6 @@
     if (jobj == NULL)
         return;
 
-    if (dvmIsWeakGlobalRef(jobj)) {
-        /*
-         * Normalize and continue.  This will tell us if the PhantomReference
-         * object is valid.
-         */
-        jobj = dvmNormalizeWeakGlobalRef((jweak) jobj);
-    }
-
     if (dvmGetJNIRefType(env, jobj) == JNIInvalidRefType) {
         LOGW("JNI WARNING: %p is not a valid JNI reference", jobj);
         printWarn = true;
@@ -692,26 +679,12 @@
  */
 static void checkClassName(JNIEnv* env, const char* className, const char* func)
 {
-    const char* cp;
-
-    /* quick check for illegal chars */
-    cp = className;
-    while (*cp != '\0') {
-        if (*cp == '.')     /* catch "java.lang.String" */
-            goto fail;
-        cp++;
+    if (!dexIsValidClassName(className, false)) {
+        LOGW("JNI WARNING: illegal class name '%s' (%s)", className, func);
+        LOGW("             (should be formed like 'dalvik/system/DexFile')");
+        LOGW("             or '[Ldalvik/system/DexFile;' or '[[B')");
+        abortMaybe();
     }
-    if (*(cp-1) == ';' && *className == 'L')
-        goto fail;         /* catch "Ljava/lang/String;" */
-
-    // TODO: need a more rigorous check here
-
-    return;
-
-fail:
-    LOGW("JNI WARNING: illegal class name '%s' (%s)", className, func);
-    LOGW("             (should be formed like 'java/lang/String')");
-    abortMaybe();
 }
 
 /*
@@ -920,83 +893,6 @@
     JNI_EXIT();
 }
 
-/*
- * Verify that the reference arguments being passed in are appropriate for
- * this method.
- *
- * At a minimum we want to make sure that the argument is a valid
- * reference.  We can also do a class lookup on the method signature
- * and verify that the object is an instance of the appropriate class,
- * but that's more expensive.
- *
- * The basic tests are redundant when indirect references are enabled,
- * since reference arguments must always be converted explicitly.  An
- * instanceof test would not be redundant, but we're not doing that at
- * this time.
- */
-static void checkMethodArgsV(JNIEnv* env, jmethodID methodID, va_list args,
-    const char* func)
-{
-#ifndef USE_INDIRECT_REF
-    JNI_ENTER();
-
-    const Method* meth = (const Method*) methodID;
-    const char* desc = meth->shorty;
-
-    LOGV("V-checking %s.%s:%s...", meth->clazz->descriptor, meth->name, desc);
-
-    while (*++desc != '\0') {       /* pre-incr to skip return type */
-        switch (*desc) {
-        case 'L':
-            {     /* 'shorty' descr uses L for all refs, incl array */
-                jobject argObj = va_arg(args, jobject);
-                checkObject0(env, argObj, func);
-            }
-            break;
-        case 'D':       /* 8-byte double */
-        case 'J':       /* 8-byte long */
-        case 'F':       /* floats normalized to doubles */
-            (void) va_arg(args, u8);
-            break;
-        default:        /* Z B C S I -- all passed as 32-bit integers */
-            (void) va_arg(args, u4);
-            break;
-        }
-    }
-
-    JNI_EXIT();
-#endif
-}
-
-/*
- * Same purpose as checkMethodArgsV, but with arguments in an array of
- * jvalue structs.
- */
-static void checkMethodArgsA(JNIEnv* env, jmethodID methodID, jvalue* args,
-    const char* func)
-{
-#ifndef USE_INDIRECT_REF
-    JNI_ENTER();
-
-    const Method* meth = (const Method*) methodID;
-    const char* desc = meth->shorty;
-    int idx = 0;
-
-    LOGV("A-checking %s.%s:%s...", meth->clazz->descriptor, meth->name, desc);
-
-    while (*++desc != '\0') {       /* pre-incr to skip return type */
-        if (*desc == 'L') {
-            jobject argObj = args[idx].l;
-            checkObject0(env, argObj, func);
-        }
-
-        idx++;
-    }
-
-    JNI_EXIT();
-#endif
-}
-
 
 /*
  * ===========================================================================
@@ -1056,7 +952,7 @@
     uLong adler = 0;
     if (!modOkay) {
         adler = adler32(0L, Z_NULL, 0);
-        adler = adler32(adler, buf, len);
+        adler = adler32(adler, (const Bytef*)buf, len);
         *(uLong*)newBuf = adler;
     }
 
@@ -1142,7 +1038,7 @@
      */
     if (!modOkay) {
         uLong adler = adler32(0L, Z_NULL, 0);
-        adler = adler32(adler, dataBuf, len);
+        adler = adler32(adler, (const Bytef*)dataBuf, len);
         if (pExtra->adler != adler) {
             LOGE("JNI: buffer modified (0x%08lx vs 0x%08lx) at addr %p",
                 pExtra->adler, adler, dataBuf);
@@ -1467,7 +1363,6 @@
 {
     CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
     CHECK_OBJECT(env, globalRef);
-#ifdef USE_INDIRECT_REF
     if (globalRef != NULL &&
         dvmGetJNIRefType(env, globalRef) != JNIGlobalRefType)
     {
@@ -1475,7 +1370,6 @@
             globalRef, dvmGetJNIRefType(env, globalRef));
         abortMaybe();
     } else
-#endif
     {
         BASE_ENV(env)->DeleteGlobalRef(env, globalRef);
     }
@@ -1496,7 +1390,6 @@
 {
     CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
     CHECK_OBJECT(env, localRef);
-#ifdef USE_INDIRECT_REF
     if (localRef != NULL &&
         dvmGetJNIRefType(env, localRef) != JNILocalRefType)
     {
@@ -1504,7 +1397,6 @@
             localRef, dvmGetJNIRefType(env, localRef));
         abortMaybe();
     } else
-#endif
     {
         BASE_ENV(env)->DeleteLocalRef(env, localRef);
     }
@@ -1547,14 +1439,9 @@
     CHECK_ENTER(env, kFlag_Default);
     CHECK_CLASS(env, clazz);
     jobject result;
-    va_list args, tmpArgs;
+    va_list args;
 
     va_start(args, methodID);
-
-    va_copy(tmpArgs, args);
-    CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);
-    va_end(tmpArgs);
-
     result = BASE_ENV(env)->NewObjectV(env, clazz, methodID, args);
     va_end(args);
 
@@ -1568,11 +1455,6 @@
     CHECK_CLASS(env, clazz);
     jobject result;
 
-    va_list tmpArgs;
-    va_copy(tmpArgs, args);
-    CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);
-    va_end(tmpArgs);
-
     result = BASE_ENV(env)->NewObjectV(env, clazz, methodID, args);
     CHECK_EXIT(env);
     return result;
@@ -1584,7 +1466,6 @@
     CHECK_CLASS(env, clazz);
     jobject result;
 
-    CHECK_METHOD_ARGS_A(env, methodID, args);
     result = BASE_ENV(env)->NewObjectA(env, clazz, methodID, args);
     CHECK_EXIT(env);
     return result;
@@ -1762,11 +1643,8 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        va_list args, tmpArgs;                                              \
+        va_list args;                                                       \
         va_start(args, methodID);                                           \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->Call##_jname##MethodV(env, obj, methodID,   \
             args);                                                          \
         va_end(args);                                                       \
@@ -1781,10 +1659,6 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        va_list tmpArgs;                                                    \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->Call##_jname##MethodV(env, obj, methodID,   \
             args);                                                          \
         CHECK_EXIT(env);                                                    \
@@ -1798,13 +1672,12 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        CHECK_METHOD_ARGS_A(env, methodID, args);                           \
         _retasgn BASE_ENV(env)->Call##_jname##MethodA(env, obj, methodID,   \
             args);                                                          \
         CHECK_EXIT(env);                                                    \
         return _retok;                                                      \
     }
-CALL_VIRTUAL(jobject, Object, Object* result, result=, result, 'L');
+CALL_VIRTUAL(jobject, Object, Object* result, result=(Object*), result, 'L');
 CALL_VIRTUAL(jboolean, Boolean, jboolean result, result=, result, 'Z');
 CALL_VIRTUAL(jbyte, Byte, jbyte result, result=, result, 'B');
 CALL_VIRTUAL(jchar, Char, jchar result, result=, result, 'C');
@@ -1826,11 +1699,8 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        va_list args, tmpArgs;                                              \
+        va_list args;                                                       \
         va_start(args, methodID);                                           \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodV(env, obj,   \
             clazz, methodID, args);                                         \
         va_end(args);                                                       \
@@ -1846,10 +1716,6 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        va_list tmpArgs;                                                    \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodV(env, obj,   \
             clazz, methodID, args);                                         \
         CHECK_EXIT(env);                                                    \
@@ -1864,13 +1730,12 @@
         CHECK_SIG(env, methodID, _retsig, false);                           \
         CHECK_VIRTUAL_METHOD(env, obj, methodID);                           \
         _retdecl;                                                           \
-        CHECK_METHOD_ARGS_A(env, methodID, args);                           \
         _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodA(env, obj,   \
             clazz, methodID, args);                                         \
         CHECK_EXIT(env);                                                    \
         return _retok;                                                      \
     }
-CALL_NONVIRTUAL(jobject, Object, Object* result, result=, result, 'L');
+CALL_NONVIRTUAL(jobject, Object, Object* result, result=(Object*), result, 'L');
 CALL_NONVIRTUAL(jboolean, Boolean, jboolean result, result=, result, 'Z');
 CALL_NONVIRTUAL(jbyte, Byte, jbyte result, result=, result, 'B');
 CALL_NONVIRTUAL(jchar, Char, jchar result, result=, result, 'C');
@@ -1891,11 +1756,8 @@
         CHECK_SIG(env, methodID, _retsig, true);                            \
         CHECK_STATIC_METHOD(env, clazz, methodID);                          \
         _retdecl;                                                           \
-        va_list args, tmpArgs;                                              \
+        va_list args;                                                       \
         va_start(args, methodID);                                           \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->CallStatic##_jname##MethodV(env, clazz,     \
             methodID, args);                                                \
         va_end(args);                                                       \
@@ -1910,10 +1772,6 @@
         CHECK_SIG(env, methodID, _retsig, true);                            \
         CHECK_STATIC_METHOD(env, clazz, methodID);                          \
         _retdecl;                                                           \
-        va_list tmpArgs;                                                    \
-        va_copy(tmpArgs, args);                                             \
-        CHECK_METHOD_ARGS_V(env, methodID, tmpArgs);                        \
-        va_end(tmpArgs);                                                    \
         _retasgn BASE_ENV(env)->CallStatic##_jname##MethodV(env, clazz,     \
             methodID, args);                                                \
         CHECK_EXIT(env);                                                    \
@@ -1927,13 +1785,12 @@
         CHECK_SIG(env, methodID, _retsig, true);                            \
         CHECK_STATIC_METHOD(env, clazz, methodID);                          \
         _retdecl;                                                           \
-        CHECK_METHOD_ARGS_A(env, methodID, args);                           \
         _retasgn BASE_ENV(env)->CallStatic##_jname##MethodA(env, clazz,     \
             methodID, args);                                                \
         CHECK_EXIT(env);                                                    \
         return _retok;                                                      \
     }
-CALL_STATIC(jobject, Object, Object* result, result=, result, 'L');
+CALL_STATIC(jobject, Object, Object* result, result=(Object*), result, 'L');
 CALL_STATIC(jboolean, Boolean, jboolean result, result=, result, 'Z');
 CALL_STATIC(jbyte, Byte, jbyte result, result=, result, 'B');
 CALL_STATIC(jchar, Char, jchar result, result=, result, 'C');
@@ -1973,7 +1830,7 @@
     result = BASE_ENV(env)->GetStringChars(env, string, isCopy);
     if (((JNIEnvExt*)env)->forceDataCopy && result != NULL) {
         // TODO: fix for indirect
-        int len = dvmStringLen(string) * 2;
+        int len = dvmStringLen((StringObject*) string) * 2;
         result = (const jchar*) createGuardedCopy(result, len, false);
         if (isCopy != NULL)
             *isCopy = JNI_TRUE;
@@ -2029,7 +1886,7 @@
     result = BASE_ENV(env)->GetStringUTFChars(env, string, isCopy);
     if (((JNIEnvExt*)env)->forceDataCopy && result != NULL) {
         // TODO: fix for indirect
-        int len = dvmStringUtf8ByteLen(string) + 1;
+        int len = dvmStringUtf8ByteLen((StringObject*) string) + 1;
         result = (const char*) createGuardedCopy(result, len, false);
         if (isCopy != NULL)
             *isCopy = JNI_TRUE;
@@ -2319,7 +2176,7 @@
     result = BASE_ENV(env)->GetStringCritical(env, string, isCopy);
     if (((JNIEnvExt*)env)->forceDataCopy && result != NULL) {
         // TODO: fix for indirect
-        int len = dvmStringLen(string) * 2;
+        int len = dvmStringLen((StringObject*) string) * 2;
         result = (const jchar*) createGuardedCopy(result, len, false);
         if (isCopy != NULL)
             *isCopy = JNI_TRUE;
diff --git a/vm/Common.h b/vm/Common.h
index 208ed20..7d8424e 100644
--- a/vm/Common.h
+++ b/vm/Common.h
@@ -40,6 +40,13 @@
 #define LIKELY(exp) (__builtin_expect((exp) != 0, true))
 #define UNLIKELY(exp) (__builtin_expect((exp) != 0, false))
 
+#define ALIGN_UP(x, n) (((size_t)(x) + (n) - 1) & ~((n) - 1))
+#define ALIGN_DOWN(x, n) ((size_t)(x) & -(n))
+#define ALIGN_UP_TO_PAGE_SIZE(p) ALIGN_UP(p, SYSTEM_PAGE_SIZE)
+#define ALIGN_DOWN_TO_PAGE_SIZE(p) ALIGN_DOWN(p, SYSTEM_PAGE_SIZE)
+
+#define CLZ(x) __builtin_clz(x)
+
 /*
  * If "very verbose" logging is enabled, make it equivalent to LOGV.
  * Otherwise, make it disappear.
diff --git a/vm/Dalvik.h b/vm/Dalvik.h
index 83c607c..81dee3c 100644
--- a/vm/Dalvik.h
+++ b/vm/Dalvik.h
@@ -26,9 +26,12 @@
 #include "Inlines.h"
 #include "Misc.h"
 #include "Bits.h"
+#include "BitVector.h"
 #include "libdex/SysUtil.h"
+#include "libdex/DexDebugInfo.h"
 #include "libdex/DexFile.h"
 #include "libdex/DexProto.h"
+#include "libdex/DexUtf.h"
 #include "libdex/ZipArchive.h"
 #include "DvmDex.h"
 #include "RawDexFile.h"
diff --git a/vm/DalvikVersion.h b/vm/DalvikVersion.h
index 8502b34..a1000ab 100644
--- a/vm/DalvikVersion.h
+++ b/vm/DalvikVersion.h
@@ -24,14 +24,14 @@
  * The version we show to tourists.
  */
 #define DALVIK_MAJOR_VERSION    1
-#define DALVIK_MINOR_VERSION    5
-#define DALVIK_BUG_VERSION      1
+#define DALVIK_MINOR_VERSION    6
+#define DALVIK_BUG_VERSION      0
 
 /*
  * VM build number.  This must change whenever something that affects the
  * way classes load changes, e.g. field ordering or vtable layout.  Changing
  * this guarantees that the optimized form of the DEX file is regenerated.
  */
-#define DALVIK_VM_BUILD         24
+#define DALVIK_VM_BUILD         26
 
 #endif /*_DALVIK_VERSION*/
diff --git a/vm/Ddm.c b/vm/Ddm.c
index ee3e9b2..528072e 100644
--- a/vm/Ddm.c
+++ b/vm/Ddm.c
@@ -509,7 +509,7 @@
 
     if (!dvmGenerateTrackedAllocationReport(&data, &len)) {
         /* assume OOM */
-        dvmThrowException("Ljava/lang/OutOfMemoryError;","recent alloc native");
+        dvmThrowOutOfMemoryError("recent alloc native");
         return NULL;
     }
 
diff --git a/vm/Debugger.c b/vm/Debugger.c
index d5215ca..87ea008 100644
--- a/vm/Debugger.c
+++ b/vm/Debugger.c
@@ -355,9 +355,9 @@
 {
     return (FrameId)(u4) frame;
 }
-static void* frameIdToFrame(FrameId id)
+static u4* frameIdToFrame(FrameId id)
 {
-    return (void*)(u4) id;
+    return (u4*)(u4) id;
 }
 
 
@@ -392,15 +392,12 @@
  */
 void dvmDbgActive(void)
 {
-    if (gDvm.debuggerActive)
+    if (DEBUGGER_ACTIVE)
         return;
 
     LOGI("Debugger is active\n");
     dvmInitBreakpoints();
-    gDvm.debuggerActive = true;
-#if defined(WITH_JIT)
-    dvmCompilerStateRefresh();
-#endif
+    dvmUpdateInterpBreak(kSubModeDebuggerActive, true);
 }
 
 /*
@@ -415,7 +412,7 @@
 {
     assert(gDvm.debuggerConnected);
 
-    gDvm.debuggerActive = false;
+    dvmUpdateInterpBreak(kSubModeDebuggerActive, false);
 
     dvmHashTableLock(gDvm.dbgRegistry);
     gDvm.debuggerConnected = false;
@@ -428,9 +425,6 @@
 
     dvmHashTableClear(gDvm.dbgRegistry);
     dvmHashTableUnlock(gDvm.dbgRegistry);
-#if defined(WITH_JIT)
-    dvmCompilerStateRefresh();
-#endif
 }
 
 /*
@@ -440,7 +434,7 @@
  */
 bool dvmDbgIsDebuggerConnected(void)
 {
-    return gDvm.debuggerActive;
+    return DEBUGGER_ACTIVE;
 }
 
 /*
@@ -581,7 +575,8 @@
 
     dvmHashTableLock(gDvm.loadedClasses);
     *pNumClasses = dvmHashTableNumEntries(gDvm.loadedClasses);
-    pRefType = *pClassRefBuf = malloc(sizeof(RefTypeId) * *pNumClasses);
+    pRefType = *pClassRefBuf =
+        (RefTypeId*)malloc(sizeof(RefTypeId) * *pNumClasses);
 
     if (dvmHashForeach(gDvm.loadedClasses, copyRefType, &pRefType) != 0) {
         LOGW("Warning: problem getting class list\n");
@@ -615,7 +610,7 @@
 
     /* over-allocate the return buffer */
     maxClasses = dvmHashTableNumEntries(gDvm.loadedClasses);
-    *pClassRefBuf = malloc(sizeof(RefTypeId) * maxClasses);
+    *pClassRefBuf = (RefTypeId*)malloc(sizeof(RefTypeId) * maxClasses);
 
     /*
      * Run through the list, looking for matches.
@@ -2137,7 +2132,7 @@
 
     framePtr = thread->curFrame;
     while (framePtr != NULL) {
-        if (!dvmIsBreakFrame(framePtr))
+        if (!dvmIsBreakFrame((u4*)framePtr))
             count++;
 
         framePtr = SAVEAREA_FROM_FP(framePtr)->prevFrame;
@@ -2173,7 +2168,7 @@
         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
         const Method* method = saveArea->method;
 
-        if (!dvmIsBreakFrame(framePtr)) {
+        if (!dvmIsBreakFrame((u4*)framePtr)) {
             if (count == num) {
                 *pFrameId = frameToFrameId(framePtr);
                 if (dvmIsInterfaceClass(method->clazz))
@@ -2553,7 +2548,7 @@
     }
 
     /* need this for InstanceOnly filters */
-    Object* thisObj = getThisObject(throwFp);
+    Object* thisObj = getThisObject((u4*)throwFp);
 
     /*
      * Hand the event to the JDWP exception handler.  Note we're using the
@@ -2575,7 +2570,7 @@
  */
 void dvmDbgPostThreadStart(Thread* thread)
 {
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         dvmJdwpPostThreadChange(gDvm.jdwpState,
             objectToObjectId(thread->threadObj), true);
     }
@@ -2588,7 +2583,7 @@
  */
 void dvmDbgPostThreadDeath(Thread* thread)
 {
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         dvmJdwpPostThreadChange(gDvm.jdwpState,
             objectToObjectId(thread->threadObj), false);
     }
@@ -2826,8 +2821,10 @@
     *pResultTag = targetThread->invokeReq.resultTag;
     if (isTagPrimitive(targetThread->invokeReq.resultTag))
         *pResultValue = targetThread->invokeReq.resultValue.j;
-    else
-        *pResultValue = objectToObjectId(targetThread->invokeReq.resultValue.l);
+    else {
+        Object* tmpObj = (Object*)targetThread->invokeReq.resultValue.l;
+        *pResultValue = objectToObjectId(tmpObj);
+    }
     *pExceptObj = targetThread->invokeReq.exceptObj;
     err = targetThread->invokeReq.err;
 
@@ -2908,7 +2905,7 @@
         pReq->resultValue.j = 0; /*0xadadadad;*/
     } else if (pReq->resultTag == JT_OBJECT) {
         /* if no exception thrown, examine object result more closely */
-        u1 newTag = resultTagFromObject(pReq->resultValue.l);
+        u1 newTag = resultTagFromObject((Object*)pReq->resultValue.l);
         if (newTag != pReq->resultTag) {
             LOGVV("  JDWP promoted result from %d to %d\n",
                 pReq->resultTag, newTag);
@@ -2924,7 +2921,7 @@
          * We can't use the "tracked allocation" mechanism here because
          * the object is going to be handed off to a different thread.
          */
-        (void) objectToObjectId(pReq->resultValue.l);
+        (void) objectToObjectId((Object*)pReq->resultValue.l);
     }
 
     if (oldExcept != NULL) {
@@ -2978,7 +2975,7 @@
     u4 insnsSize = dvmGetMethodInsnsSize(method);
     AddressSetContext context;
 
-    result = calloc(1, sizeof(AddressSet) + (insnsSize/8) + 1);
+    result = (AddressSet*)calloc(1, sizeof(AddressSet) + (insnsSize/8) + 1);
     result->setSize = insnsSize;
 
     memset(&context, 0, sizeof(context));
diff --git a/vm/Debugger.h b/vm/Debugger.h
index d722160..a842442 100644
--- a/vm/Debugger.h
+++ b/vm/Debugger.h
@@ -32,6 +32,8 @@
 struct Method;
 struct Thread;
 
+#define DEBUGGER_ACTIVE (gDvm.interpBreak & kSubModeDebuggerActive)
+
 /*
  * Used by StepControl to track a set of addresses associated with
  * a single line.
diff --git a/vm/Dvm.mk b/vm/Dvm.mk
index 554ba70..aea0b53 100644
--- a/vm/Dvm.mk
+++ b/vm/Dvm.mk
@@ -25,22 +25,13 @@
 # Compiler defines.
 #
 LOCAL_CFLAGS += -fstrict-aliasing -Wstrict-aliasing=2 -fno-align-jumps
-#LOCAL_CFLAGS += -DUSE_INDIRECT_REF
-LOCAL_CFLAGS += -Wall -Wextra -Wno-unused-parameter
+LOCAL_CFLAGS += -Wall -Wextra -Wno-unused-parameter -Wc++-compat
 LOCAL_CFLAGS += -DARCH_VARIANT=\"$(dvm_arch_variant)\"
 
 #
 # Optional features.  These may impact the size or performance of the VM.
 #
 
-ifeq ($(WITH_DEADLOCK_PREDICTION),true)
-  LOCAL_CFLAGS += -DWITH_DEADLOCK_PREDICTION
-  WITH_MONITOR_TRACKING := true
-endif
-ifeq ($(WITH_MONITOR_TRACKING),true)
-  LOCAL_CFLAGS += -DWITH_MONITOR_TRACKING
-endif
-
 # Make a debugging version when building the simulator (if not told
 # otherwise) and when explicitly asked.
 dvm_make_debug_vm := false
@@ -97,6 +88,7 @@
 	AllocTracker.c \
 	Atomic.c.arm \
 	AtomicCache.c \
+	BitVector.c.arm \
 	CheckJni.c \
 	Ddm.c \
 	Debugger.c \
@@ -105,13 +97,14 @@
 	Hash.c \
 	IndirectRefTable.c.arm \
 	Init.c \
+	InitRefs.c \
 	InlineNative.c.arm \
 	Inlines.c \
 	Intern.c \
 	Jni.c \
 	JarFile.c \
 	LinearAlloc.c \
-	Misc.c.arm \
+	Misc.c \
 	Native.c \
 	PointerSet.c \
 	Profile.c \
@@ -124,7 +117,6 @@
 	TestCompability.c \
 	Thread.c \
 	UtfString.c \
-	alloc/clz.c.arm \
 	alloc/Alloc.c \
 	alloc/CardTable.c \
 	alloc/HeapBitmap.c.arm \
@@ -138,9 +130,11 @@
 	analysis/CodeVerify.c \
 	analysis/DexPrepare.c \
 	analysis/DexVerify.c \
+	analysis/Liveness.c \
 	analysis/Optimize.c \
 	analysis/RegisterMap.c \
 	analysis/VerifySubs.c \
+	analysis/VfyBasicBlock.c \
 	hprof/Hprof.c \
 	hprof/HprofClass.c \
 	hprof/HprofHeap.c \
@@ -166,11 +160,13 @@
 	native/dalvik_system_VMStack.c \
 	native/dalvik_system_Zygote.c \
 	native/java_lang_Class.c \
+	native/java_lang_Double.c \
+	native/java_lang_Float.c \
+	native/java_lang_Math.c \
 	native/java_lang_Object.c \
 	native/java_lang_Runtime.c \
 	native/java_lang_String.c \
 	native/java_lang_System.c \
-	native/java_lang_SystemProperties.c \
 	native/java_lang_Throwable.c \
 	native/java_lang_VMClassLoader.c \
 	native/java_lang_VMThread.c \
@@ -222,18 +218,12 @@
 	compiler/InlineTransformation.c \
 	compiler/IntermediateRep.c \
 	compiler/Dataflow.c \
+	compiler/MethodSSATransformation.c \
 	compiler/Loop.c \
 	compiler/Ralloc.c \
 	interp/Jit.c
 endif
 
-ifeq ($(strip $(WITH_HPROF_STACK)),true)
-  LOCAL_SRC_FILES += \
-	hprof/HprofStack.c \
-	hprof/HprofStackFrame.c
-  LOCAL_CFLAGS += -DWITH_HPROF_STACK=1
-endif # WITH_HPROF_STACK
-
 LOCAL_C_INCLUDES += \
 	$(JNI_H_INCLUDE) \
 	dalvik \
@@ -282,6 +272,7 @@
 ifeq ($(dvm_arch),x86)
   ifeq ($(dvm_os),linux)
     MTERP_ARCH_KNOWN := true
+    LOCAL_CFLAGS += -DDVM_JMP_TABLE_MTERP=1
     LOCAL_SRC_FILES += \
 		arch/$(dvm_arch_variant)/Call386ABI.S \
 		arch/$(dvm_arch_variant)/Hints386ABI.c \
diff --git a/vm/DvmDex.c b/vm/DvmDex.c
index 94422b3..c5790e5 100644
--- a/vm/DvmDex.c
+++ b/vm/DvmDex.c
@@ -121,7 +121,7 @@
         goto bail;
     }
 
-    pDexFile = dexFileParse(memMap.addr, memMap.length, parseFlags);
+    pDexFile = dexFileParse((u1*)memMap.addr, memMap.length, parseFlags);
     if (pDexFile == NULL) {
         LOGE("DEX parse failed\n");
         sysReleaseShmem(&memMap);
@@ -137,6 +137,7 @@
 
     /* tuck this into the DexFile so it gets released later */
     sysCopyMap(&pDvmDex->memMap, &memMap);
+    pDvmDex->isMappedReadOnly = true;
     *ppDvmDex = pDvmDex;
     result = 0;
 
@@ -164,7 +165,7 @@
         parseFlags |= kDexParseVerifyChecksum;
     */
 
-    pDexFile = dexFileParse(addr, len, parseFlags);
+    pDexFile = dexFileParse((u1*)addr, len, parseFlags);
     if (pDexFile == NULL) {
         LOGE("DEX parse failed\n");
         goto bail;
@@ -175,6 +176,7 @@
         goto bail;
     }
 
+    pDvmDex->isMappedReadOnly = false;
     *ppDvmDex = pDvmDex;
     result = 0;
 
diff --git a/vm/DvmDex.h b/vm/DvmDex.h
index f36940b..ad82e54 100644
--- a/vm/DvmDex.h
+++ b/vm/DvmDex.h
@@ -58,6 +58,7 @@
     struct AtomicCache* pInterfaceCache;
 
     /* shared memory region with file contents */
+    bool                isMappedReadOnly;
     MemMapping          memMap;
 
     /* lock ensuring mutual exclusion during updates */
diff --git a/vm/Exception.c b/vm/Exception.c
index b4b7160..bdb3034 100644
--- a/vm/Exception.c
+++ b/vm/Exception.c
@@ -99,170 +99,38 @@
 static bool initException(Object* exception, const char* msg, Object* cause,
     Thread* self);
 
-
-/*
- * Cache pointers to some of the exception classes we use locally.
- *
- * Note this is NOT called during dexopt optimization.  Some of the fields
- * are initialized by the verifier (dvmVerifyCodeFlow).
- */
-bool dvmExceptionStartup(void)
-{
-    gDvm.classJavaLangThrowable =
-        dvmFindSystemClassNoInit("Ljava/lang/Throwable;");
-    gDvm.classJavaLangRuntimeException =
-        dvmFindSystemClassNoInit("Ljava/lang/RuntimeException;");
-    gDvm.classJavaLangStackOverflowError =
-        dvmFindSystemClassNoInit("Ljava/lang/StackOverflowError;");
-    gDvm.classJavaLangError =
-        dvmFindSystemClassNoInit("Ljava/lang/Error;");
-    gDvm.classJavaLangStackTraceElement =
-        dvmFindSystemClassNoInit("Ljava/lang/StackTraceElement;");
-    gDvm.classJavaLangStackTraceElementArray =
-        dvmFindArrayClass("[Ljava/lang/StackTraceElement;", NULL);
-    if (gDvm.classJavaLangThrowable == NULL ||
-        gDvm.classJavaLangStackTraceElement == NULL ||
-        gDvm.classJavaLangStackTraceElementArray == NULL)
-    {
-        LOGE("Could not find one or more essential exception classes\n");
-        return false;
-    }
-
-    /*
-     * Find the constructor.  Note that, unlike other saved method lookups,
-     * we're using a Method* instead of a vtable offset.  This is because
-     * constructors don't have vtable offsets.  (Also, since we're creating
-     * the object in question, it's impossible for anyone to sub-class it.)
-     */
-    Method* meth;
-    meth = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangStackTraceElement,
-        "<init>", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V");
-    if (meth == NULL) {
-        LOGE("Unable to find constructor for StackTraceElement\n");
-        return false;
-    }
-    gDvm.methJavaLangStackTraceElement_init = meth;
-
-    /* grab an offset for the stackData field */
-    gDvm.offJavaLangThrowable_stackState =
-        dvmFindFieldOffset(gDvm.classJavaLangThrowable,
-            "stackState", "Ljava/lang/Object;");
-    if (gDvm.offJavaLangThrowable_stackState < 0) {
-        LOGE("Unable to find Throwable.stackState\n");
-        return false;
-    }
-
-    /* and one for the cause field, just 'cause */
-    gDvm.offJavaLangThrowable_cause =
-        dvmFindFieldOffset(gDvm.classJavaLangThrowable,
-            "cause", "Ljava/lang/Throwable;");
-    if (gDvm.offJavaLangThrowable_cause < 0) {
-        LOGE("Unable to find Throwable.cause\n");
-        return false;
-    }
-
-    return true;
-}
-
-/*
- * Clean up.
- */
-void dvmExceptionShutdown(void)
-{
-    // nothing to do
-}
-
-
-/*
- * Format the message into a small buffer and pass it along.
- */
-void dvmThrowExceptionFmtV(const char* exceptionDescriptor, const char* fmt,
-    va_list args)
+void dvmThrowExceptionFmtV(ClassObject* exceptionClass,
+    const char* fmt, va_list args)
 {
     char msgBuf[512];
 
     vsnprintf(msgBuf, sizeof(msgBuf), fmt, args);
-    dvmThrowChainedException(exceptionDescriptor, msgBuf, NULL);
+    dvmThrowChainedException(exceptionClass, msgBuf, NULL);
 }
 
-/*
- * Create a Throwable and throw an exception in the current thread (where
- * "throwing" just means "set the thread's exception pointer").
- *
- * "msg" and/or "cause" may be NULL.
- *
- * If we have a bad exception hierarchy -- something in Throwable.<init>
- * is missing -- then every attempt to throw an exception will result
- * in another exception.  Exceptions are generally allowed to "chain"
- * to other exceptions, so it's hard to auto-detect this problem.  It can
- * only happen if the system classes are broken, so it's probably not
- * worth spending cycles to detect it.
- *
- * We do have one case to worry about: if the classpath is completely
- * wrong, we'll go into a death spin during startup because we can't find
- * the initial class and then we can't find NoClassDefFoundError.  We have
- * to handle this case.
- *
- * [Do we want to cache pointers to common exception classes?]
- */
-void dvmThrowChainedException(const char* exceptionDescriptor, const char* msg,
-    Object* cause)
-{
-    ClassObject* excepClass;
-
-    LOGV("THROW '%s' msg='%s' cause=%s\n",
-        exceptionDescriptor, msg,
-        (cause != NULL) ? cause->clazz->descriptor : "(none)");
-
-    if (gDvm.initializing) {
-        if (++gDvm.initExceptionCount >= 2) {
-            LOGE("Too many exceptions during init (failed on '%s' '%s')\n",
-                exceptionDescriptor, msg);
-            dvmAbort();
-        }
-    }
-
-    excepClass = dvmFindSystemClass(exceptionDescriptor);
-    if (excepClass == NULL) {
-        /*
-         * We couldn't find the exception class.  The attempt to find a
-         * nonexistent class should have raised an exception.  If no
-         * exception is currently raised, then we're pretty clearly unable
-         * to throw ANY sort of exception, and we need to pack it in.
-         *
-         * If we were able to throw the "class load failed" exception,
-         * stick with that.  Ideally we'd stuff the original exception
-         * into the "cause" field, but since we can't find it we can't
-         * do that.  The exception class name should be in the "message"
-         * field.
-         */
-        if (!dvmCheckException(dvmThreadSelf())) {
-            LOGE("FATAL: unable to throw exception (failed on '%s' '%s')\n",
-                exceptionDescriptor, msg);
-            dvmAbort();
-        }
-        return;
-    }
-
-    dvmThrowChainedExceptionByClass(excepClass, msg, cause);
-}
-
-/*
- * Start/continue throwing process now that we have a class reference.
- */
-void dvmThrowChainedExceptionByClass(ClassObject* excepClass, const char* msg,
+void dvmThrowChainedException(ClassObject* excepClass, const char* msg,
     Object* cause)
 {
     Thread* self = dvmThreadSelf();
     Object* exception;
 
+    if (excepClass == NULL) {
+        /*
+         * The exception class was passed in as NULL. This might happen
+         * early on in VM initialization. There's nothing better to do
+         * than just log the message as an error and abort.
+         */
+        LOGE("Fatal error: %s\n", msg);
+        dvmAbort();
+    }
+
     /* make sure the exception is initialized */
     if (!dvmIsClassInitialized(excepClass) && !dvmInitClass(excepClass)) {
         LOGE("ERROR: unable to initialize exception class '%s'\n",
             excepClass->descriptor);
         if (strcmp(excepClass->descriptor, "Ljava/lang/InternalError;") == 0)
             dvmAbort();
-        dvmThrowChainedException("Ljava/lang/InternalError;",
+        dvmThrowChainedException(gDvm.exInternalError,
             "failed to init original exception class", cause);
         return;
     }
@@ -316,29 +184,13 @@
     dvmReleaseTrackedAlloc(exception, self);
 }
 
-/*
- * Throw the named exception using the human-readable form of the class
- * descriptor as the exception message, and with the specified cause.
- */
-void dvmThrowChainedExceptionWithClassMessage(const char* exceptionDescriptor,
-    const char* messageDescriptor, Object* cause)
-{
-    char* message = dvmHumanReadableDescriptor(messageDescriptor);
-
-    dvmThrowChainedException(exceptionDescriptor, message, cause);
-    free(message);
-}
-
-/*
- * Like dvmThrowExceptionWithMessageFromDescriptor, but take a
- * class object instead of a name.
- */
-void dvmThrowExceptionByClassWithClassMessage(ClassObject* exceptionClass,
-    const char* messageDescriptor)
+void dvmThrowChainedExceptionWithClassMessage(
+    ClassObject* exceptionClass, const char* messageDescriptor,
+    Object* cause)
 {
     char* message = dvmDescriptorToName(messageDescriptor);
 
-    dvmThrowExceptionByClass(exceptionClass, message);
+    dvmThrowChainedException(exceptionClass, message, cause);
     free(message);
 }
 
@@ -425,7 +277,7 @@
     }
 
     if (cause != NULL) {
-        if (!dvmInstanceof(cause->clazz, gDvm.classJavaLangThrowable)) {
+        if (!dvmInstanceof(cause->clazz, gDvm.exThrowable)) {
             LOGE("Tried to init exception with cause '%s'\n",
                 cause->clazz->descriptor);
             dvmAbort();
@@ -519,7 +371,7 @@
             excepClass->descriptor, msg, initKind);
         assert(strcmp(excepClass->descriptor,
                       "Ljava/lang/RuntimeException;") != 0);
-        dvmThrowChainedException("Ljava/lang/RuntimeException;",
+        dvmThrowChainedException(gDvm.exRuntimeException,
             "re-throw on exception class missing constructor", NULL);
         goto bail;
     }
@@ -599,19 +451,15 @@
 
 
 /*
- * Clear the pending exception and the "initExceptionCount" counter.  This
- * is used by the optimization and verification code, which has to run with
- * "initializing" set to avoid going into a death-spin if the "class not
- * found" exception can't be found.
+ * Clear the pending exception. This is used by the optimization and
+ * verification code, which mostly happens during runs of dexopt.
  *
  * This can also be called when the VM is in a "normal" state, e.g. when
- * verifying classes that couldn't be verified at optimization time.  The
- * reset of initExceptionCount should be harmless in that case.
+ * verifying classes that couldn't be verified at optimization time.
  */
 void dvmClearOptException(Thread* self)
 {
     self->exception = NULL;
-    gDvm.initExceptionCount = 0;
 }
 
 /*
@@ -620,8 +468,8 @@
  */
 bool dvmIsCheckedException(const Object* exception)
 {
-    if (dvmInstanceof(exception->clazz, gDvm.classJavaLangError) ||
-        dvmInstanceof(exception->clazz, gDvm.classJavaLangRuntimeException))
+    if (dvmInstanceof(exception->clazz, gDvm.exError) ||
+        dvmInstanceof(exception->clazz, gDvm.exRuntimeException))
     {
         return false;
     } else {
@@ -690,7 +538,7 @@
  */
 Object* dvmGetExceptionCause(const Object* exception)
 {
-    if (!dvmInstanceof(exception->clazz, gDvm.classJavaLangThrowable)) {
+    if (!dvmInstanceof(exception->clazz, gDvm.exThrowable)) {
         LOGE("Tried to get cause from object of type '%s'\n",
             exception->clazz->descriptor);
         dvmAbort();
@@ -897,7 +745,7 @@
          * if this was a native method.
          */
         assert(saveArea->prevFrame != NULL);
-        if (dvmIsBreakFrame(saveArea->prevFrame)) {
+        if (dvmIsBreakFrame((u4*)saveArea->prevFrame)) {
             if (!scanOnly)
                 break;      // bail with catchAddr == -1
 
@@ -914,7 +762,7 @@
             saveArea = SAVEAREA_FROM_FP(fp);
             fp = saveArea->prevFrame;           // this may be a good one
             while (fp != NULL) {
-                if (!dvmIsBreakFrame(fp)) {
+                if (!dvmIsBreakFrame((u4*)fp)) {
                     saveArea = SAVEAREA_FROM_FP(fp);
                     if (!dvmIsNativeMethod(saveArea->method))
                         break;
@@ -1001,9 +849,9 @@
         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
         const Method* method = saveArea->method;
 
-        if (dvmIsBreakFrame(fp))
+        if (dvmIsBreakFrame((u4*)fp))
             break;
-        if (!dvmInstanceof(method->clazz, gDvm.classJavaLangThrowable))
+        if (!dvmInstanceof(method->clazz, gDvm.exThrowable))
             break;
         //LOGD("EXCEP: ignoring %s.%s\n",
         //         method->clazz->descriptor, method->name);
@@ -1018,7 +866,7 @@
     while (fp != NULL) {
         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
 
-        if (!dvmIsBreakFrame(fp))
+        if (!dvmIsBreakFrame((u4*)fp))
             stackDepth++;
 
         assert(fp != saveArea->prevFrame);
@@ -1059,7 +907,7 @@
         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
         const Method* method = saveArea->method;
 
-        if (!dvmIsBreakFrame(fp)) {
+        if (!dvmIsBreakFrame((u4*)fp)) {
             //LOGD("EXCEP keeping %s.%s\n", method->clazz->descriptor,
             //         method->name);
 
@@ -1354,18 +1202,31 @@
     }
 }
 
-void dvmThrowAIOOBE(int index, int length)
-{
-    dvmThrowExceptionFmt("Ljava/lang/ArrayIndexOutOfBoundsException;",
-        "index=%d length=%d", index, length);
+void dvmThrowAbstractMethodError(const char* msg) {
+    dvmThrowException(gDvm.exAbstractMethodError, msg);
 }
 
-static void dvmThrowTypeError(const char* exceptionClassName, const char* fmt,
+void dvmThrowArithmeticException(const char* msg) {
+    dvmThrowException(gDvm.exArithmeticException, msg);
+}
+
+void dvmThrowArrayIndexOutOfBoundsException(int length, int index)
+{
+    dvmThrowExceptionFmt(gDvm.exArrayIndexOutOfBoundsException,
+        "length=%d; index=%d", length, index);
+}
+
+/*
+ * Throw the indicated exception, with a message based on a format
+ * in which "%s" is used exactly twice, first for a received class and
+ * second for the expected class.
+ */
+static void throwTypeError(ClassObject* exceptionClass, const char* fmt,
     ClassObject* actual, ClassObject* desired)
 {
     char* actualClassName = dvmHumanReadableDescriptor(actual->descriptor);
     char* desiredClassName = dvmHumanReadableDescriptor(desired->descriptor);
-    dvmThrowExceptionFmt(exceptionClassName, fmt,
+    dvmThrowExceptionFmt(exceptionClass, fmt,
         actualClassName, desiredClassName);
     free(desiredClassName);
     free(actualClassName);
@@ -1373,13 +1234,212 @@
 
 void dvmThrowArrayStoreException(ClassObject* actual, ClassObject* desired)
 {
-    dvmThrowTypeError("Ljava/lang/ArrayStoreException;",
+    throwTypeError(gDvm.exArrayStoreException,
         "%s cannot be stored in an array of type %s",
         actual, desired);
 }
 
 void dvmThrowClassCastException(ClassObject* actual, ClassObject* desired)
 {
-    dvmThrowTypeError("Ljava/lang/ClassCastException;",
+    throwTypeError(gDvm.exClassCastException,
         "%s cannot be cast to %s", actual, desired);
 }
+
+void dvmThrowClassCircularityError(const char* descriptor) {
+    dvmThrowExceptionWithClassMessage(gDvm.exClassCircularityError,
+            descriptor);
+}
+
+void dvmThrowClassFormatError(const char* msg) {
+    dvmThrowException(gDvm.exClassFormatError, msg);
+}
+
+void dvmThrowClassNotFoundException(const char* name) {
+    dvmThrowChainedClassNotFoundException(name, NULL);
+}
+
+void dvmThrowChainedClassNotFoundException(const char* name, Object* cause) {
+    /*
+     * Note: This exception is thrown in response to a request coming
+     * from client code for the name as given, so it is preferable to
+     * make the exception message be that string, per se, instead of
+     * trying to prettify it.
+     */
+    dvmThrowChainedException(gDvm.exClassNotFoundException, name, cause);
+}
+
+void dvmThrowExceptionInInitializerError(void)
+{
+    /*
+     * TODO: Do we want to wrap it if the original is an Error rather than
+     * an Exception?
+     *
+     * TODO: Should this just use dvmWrapException()?
+     */
+
+    if (gDvm.exExceptionInInitializerError == NULL) {
+        /*
+         * ExceptionInInitializerError isn't itself initialized. This
+         * can happen very early during VM startup if there is a
+         * problem with one of the corest-of-the-core classes, and it
+         * can possibly happen during a dexopt run. Rather than do
+         * anything fancier, we just abort here with a blatant
+         * message.
+         */
+        LOGE("Fatal error during early class initialization:\n");
+        dvmLogExceptionStackTrace();
+        dvmAbort();
+    }
+
+    Thread* self = dvmThreadSelf();
+    Object* exception = dvmGetException(self);
+
+    dvmAddTrackedAlloc(exception, self);
+    dvmClearException(self);
+
+    dvmThrowChainedException(gDvm.exExceptionInInitializerError,
+            NULL, exception);
+    dvmReleaseTrackedAlloc(exception, self);
+}
+
+void dvmThrowFileNotFoundException(const char* msg) {
+    dvmThrowException(gDvm.exFileNotFoundException, msg);
+}
+
+void dvmThrowIOException(const char* msg) {
+    dvmThrowException(gDvm.exIOException, msg);
+}
+
+void dvmThrowIllegalAccessException(const char* msg) {
+    dvmThrowException(gDvm.exIllegalAccessException, msg);
+}
+
+void dvmThrowIllegalAccessError(const char* msg) {
+    dvmThrowException(gDvm.exIllegalAccessError, msg);
+}
+
+void dvmThrowIllegalArgumentException(const char* msg) {
+    dvmThrowException(gDvm.exIllegalArgumentException, msg);
+}
+
+void dvmThrowIllegalMonitorStateException(const char* msg) {
+    dvmThrowException(gDvm.exIllegalMonitorStateException, msg);
+}
+
+void dvmThrowIllegalStateException(const char* msg) {
+    dvmThrowException(gDvm.exIllegalStateException, msg);
+}
+
+void dvmThrowIllegalThreadStateException(const char* msg) {
+    dvmThrowException(gDvm.exIllegalThreadStateException, msg);
+}
+
+void dvmThrowIncompatibleClassChangeError(const char* msg) {
+    dvmThrowException(gDvm.exIncompatibleClassChangeError, msg);
+}
+
+void dvmThrowIncompatibleClassChangeErrorWithClassMessage(
+        const char* descriptor)
+{
+    dvmThrowExceptionWithClassMessage(
+            gDvm.exIncompatibleClassChangeError, descriptor);
+}
+
+void dvmThrowInstantiationException(ClassObject* clazz,
+        const char* extraDetail) {
+    char* className = dvmHumanReadableDescriptor(clazz->descriptor);
+    dvmThrowExceptionFmt(gDvm.exInstantiationException,
+            "can't instantiate class %s%s%s", className,
+            (extraDetail == NULL) ? "" : "; ",
+            (extraDetail == NULL) ? "" : extraDetail);
+    free(className);
+}
+
+void dvmThrowInternalError(const char* msg) {
+    dvmThrowException(gDvm.exInternalError, msg);
+}
+
+void dvmThrowInterruptedException(const char* msg) {
+    dvmThrowException(gDvm.exInterruptedException, msg);
+}
+
+void dvmThrowLinkageError(const char* msg) {
+    dvmThrowException(gDvm.exLinkageError, msg);
+}
+
+void dvmThrowNegativeArraySizeException(s4 size) {
+    dvmThrowExceptionFmt(gDvm.exNegativeArraySizeException, "%d", size);
+}
+
+void dvmThrowNoClassDefFoundError(const char* descriptor) {
+    dvmThrowExceptionWithClassMessage(gDvm.exNoClassDefFoundError,
+            descriptor);
+}
+
+void dvmThrowChainedNoClassDefFoundError(const char* descriptor,
+        Object* cause) {
+    dvmThrowChainedExceptionWithClassMessage(
+            gDvm.exNoClassDefFoundError, descriptor, cause);
+}
+
+void dvmThrowNoSuchFieldError(const char* msg) {
+    dvmThrowException(gDvm.exNoSuchFieldError, msg);
+}
+
+void dvmThrowNoSuchFieldException(const char* msg) {
+    dvmThrowException(gDvm.exNoSuchFieldException, msg);
+}
+
+void dvmThrowNoSuchMethodError(const char* msg) {
+    dvmThrowException(gDvm.exNoSuchMethodError, msg);
+}
+
+void dvmThrowNullPointerException(const char* msg) {
+    dvmThrowException(gDvm.exNullPointerException, msg);
+}
+
+void dvmThrowOutOfMemoryError(const char* msg) {
+    dvmThrowException(gDvm.exOutOfMemoryError, msg);
+}
+
+void dvmThrowRuntimeException(const char* msg) {
+    dvmThrowException(gDvm.exRuntimeException, msg);
+}
+
+void dvmThrowStaleDexCacheError(const char* msg) {
+    dvmThrowException(gDvm.exStaleDexCacheError, msg);
+}
+
+void dvmThrowStringIndexOutOfBoundsExceptionWithIndex(jsize stringLength,
+        jsize requestIndex) {
+    dvmThrowExceptionFmt(gDvm.exStringIndexOutOfBoundsException,
+            "length=%d; index=%d", stringLength, requestIndex);
+}
+
+void dvmThrowStringIndexOutOfBoundsExceptionWithRegion(jsize stringLength,
+        jsize requestStart, jsize requestLength) {
+    dvmThrowExceptionFmt(gDvm.exStringIndexOutOfBoundsException,
+            "length=%d; regionStart=%d; regionLength=%d",
+            stringLength, requestStart, requestLength);
+}
+
+void dvmThrowTypeNotPresentException(const char* descriptor) {
+    dvmThrowExceptionWithClassMessage(gDvm.exTypeNotPresentException,
+            descriptor);
+}
+
+void dvmThrowUnsatisfiedLinkError(const char* msg) {
+    dvmThrowException(gDvm.exUnsatisfiedLinkError, msg);
+}
+
+void dvmThrowUnsupportedOperationException(const char* msg) {
+    dvmThrowException(gDvm.exUnsupportedOperationException, msg);
+}
+
+void dvmThrowVerifyError(const char* descriptor) {
+    dvmThrowExceptionWithClassMessage(gDvm.exVerifyError, descriptor);
+}
+
+void dvmThrowVirtualMachineError(const char* msg) {
+    dvmThrowException(gDvm.exVirtualMachineError, msg);
+}
diff --git a/vm/Exception.h b/vm/Exception.h
index 6d05b09..3022e04 100644
--- a/vm/Exception.h
+++ b/vm/Exception.h
@@ -20,89 +20,73 @@
 #ifndef _DALVIK_EXCEPTION
 #define _DALVIK_EXCEPTION
 
-/* initialization */
-bool dvmExceptionStartup(void);
-void dvmExceptionShutdown(void);
-
 /*
- * Throw an exception in the current thread, by class descriptor.
+ * Create a Throwable and throw an exception in the current thread (where
+ * "throwing" just means "set the thread's exception pointer").
+ *
+ * "msg" and/or "cause" may be NULL.
+ *
+ * If we have a bad exception hierarchy -- something in Throwable.<init>
+ * is missing -- then every attempt to throw an exception will result
+ * in another exception.  Exceptions are generally allowed to "chain"
+ * to other exceptions, so it's hard to auto-detect this problem.  It can
+ * only happen if the system classes are broken, so it's probably not
+ * worth spending cycles to detect it.
+ *
+ * We do have one case to worry about: if the classpath is completely
+ * wrong, we'll go into a death spin during startup because we can't find
+ * the initial class and then we can't find NoClassDefFoundError.  We have
+ * to handle this case.
  */
-void dvmThrowChainedException(const char* exceptionDescriptor, const char* msg,
-    Object* cause);
-INLINE void dvmThrowException(const char* exceptionDescriptor,
+void dvmThrowChainedException(ClassObject* exceptionClass,
+    const char* msg, Object* cause);
+INLINE void dvmThrowException(ClassObject* exceptionClass,
     const char* msg)
 {
-    dvmThrowChainedException(exceptionDescriptor, msg, NULL);
+    dvmThrowChainedException(exceptionClass, msg, NULL);
 }
 
 /*
- * Throw an ArrayIndexOutOfBoundsException in the current thread, using the given
- * index and array length in the detail message.
+ * Like dvmThrowException, but takes printf-style args for the message.
  */
-void dvmThrowAIOOBE(int index, int length);
-/*
- * Throw an ArrayStoreException in the current thread, using the given classes'
- * names in the detail message.
- */
-void dvmThrowArrayStoreException(ClassObject* actual, ClassObject* desired);
-
-/**
- * Throw a ClassCastException in the current thread, using the given classes'
- * names in the detail message.
- */
-void dvmThrowClassCastException(ClassObject* actual, ClassObject* desired);
-
-/*
- * Like dvmThrowChainedException, but takes printf-style args for the message.
- */
-void dvmThrowExceptionFmtV(const char* exceptionDescriptor, const char* fmt,
-    va_list args);
-void dvmThrowExceptionFmt(const char* exceptionDescriptor, const char* fmt, ...)
+void dvmThrowExceptionFmtV(ClassObject* exceptionClass,
+    const char* fmt, va_list args);
+void dvmThrowExceptionFmt(ClassObject* exceptionClass,
+    const char* fmt, ...)
 #if defined(__GNUC__)
     __attribute__ ((format(printf, 2, 3)))
 #endif
     ;
-INLINE void dvmThrowExceptionFmt(const char* exceptionDescriptor,
+INLINE void dvmThrowExceptionFmt(ClassObject* exceptionClass,
     const char* fmt, ...)
 {
     va_list args;
     va_start(args, fmt);
-    dvmThrowExceptionFmtV(exceptionDescriptor, fmt, args);
+    dvmThrowExceptionFmtV(exceptionClass, fmt, args);
     va_end(args);
 }
 
 /*
- * Throw an exception in the current thread, by class object.
+ * Like dvmThrowChainedException, but take a class object
+ * instead of a name and turn the given message into the
+ * human-readable form for a descriptor.
  */
-void dvmThrowChainedExceptionByClass(ClassObject* exceptionClass,
-    const char* msg, Object* cause);
-INLINE void dvmThrowExceptionByClass(ClassObject* exceptionClass,
-    const char* msg)
-{
-    dvmThrowChainedExceptionByClass(exceptionClass, msg, NULL);
-}
+void dvmThrowChainedExceptionWithClassMessage(
+    ClassObject* exceptionClass, const char* messageDescriptor,
+    Object* cause);
 
 /*
- * Throw the named exception using the name of a class as the exception
- * message.
+ * Like dvmThrowException, but take a class object instead of a name
+ * and turn the given message into the human-readable form for a descriptor.
  */
-void dvmThrowChainedExceptionWithClassMessage(const char* exceptionDescriptor,
-    const char* messageDescriptor, Object* cause);
-INLINE void dvmThrowExceptionWithClassMessage(const char* exceptionDescriptor,
-    const char* messageDescriptor)
+INLINE void dvmThrowExceptionWithClassMessage(
+    ClassObject* exceptionClass, const char* messageDescriptor)
 {
-    dvmThrowChainedExceptionWithClassMessage(exceptionDescriptor,
-        messageDescriptor, NULL);
+    dvmThrowChainedExceptionWithClassMessage(exceptionClass,
+            messageDescriptor, NULL);
 }
 
 /*
- * Like dvmThrowExceptionWithMessageFromDescriptor, but take a
- * class object instead of a name.
- */
-void dvmThrowExceptionByClassWithClassMessage(ClassObject* exceptionClass,
-    const char* messageDescriptor);
-
-/*
  * Return the exception being thrown in the current thread, or NULL if
  * no exception is pending.
  */
@@ -217,4 +201,260 @@
  */
 void dvmLogRawStackTrace(const int* intVals, int stackDepth);
 
+/**
+ * Throw an AbstractMethodError in the current thread, with the given detail
+ * message.
+ */
+void dvmThrowAbstractMethodError(const char* msg);
+
+/**
+ * Throw an ArithmeticException in the current thread, with the given detail
+ * message.
+ */
+void dvmThrowArithmeticException(const char* msg);
+
+/*
+ * Throw an ArrayIndexOutOfBoundsException in the current thread,
+ * using the given array length and index in the detail message.
+ */
+void dvmThrowArrayIndexOutOfBoundsException(int length, int index);
+
+/*
+ * Throw an ArrayStoreException in the current thread, using the given classes'
+ * names in the detail message.
+ */
+void dvmThrowArrayStoreException(ClassObject* actual, ClassObject* desired);
+
+/**
+ * Throw a ClassCastException in the current thread, using the given classes'
+ * names in the detail message.
+ */
+void dvmThrowClassCastException(ClassObject* actual, ClassObject* desired);
+
+/**
+ * Throw a ClassCircularityError in the current thread, with the
+ * human-readable form of the given descriptor as the detail message.
+ */
+void dvmThrowClassCircularityError(const char* descriptor);
+
+/**
+ * Throw a ClassFormatError in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowClassFormatError(const char* msg);
+
+/**
+ * Throw a ClassNotFoundException in the current thread, with the given
+ * class name as the detail message.
+ */
+void dvmThrowClassNotFoundException(const char* name);
+
+/**
+ * Throw a ClassNotFoundException in the current thread, with the given
+ * cause, and the given class name as the detail message.
+ */
+void dvmThrowChainedClassNotFoundException(const char* name, Object* cause);
+
+/*
+ * Throw the VM-spec-mandated error when an exception is thrown during
+ * class initialization. Unlike other helper functions, this automatically
+ * wraps the current thread's pending exception.
+ */
+void dvmThrowExceptionInInitializerError(void);
+
+/**
+ * Throw a FileNotFoundException in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowFileNotFoundException(const char* msg);
+
+/**
+ * Throw an IOException in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowIOException(const char* msg);
+
+/**
+ * Throw an IllegalAccessError in the current thread, with the
+ * given detail message.
+ */
+void dvmThrowIllegalAccessError(const char* msg);
+
+/**
+ * Throw an IllegalAccessException in the current thread, with the
+ * given detail message.
+ */
+void dvmThrowIllegalAccessException(const char* msg);
+
+/**
+ * Throw an IllegalArgumentException in the current thread, with the
+ * given detail message.
+ */
+void dvmThrowIllegalArgumentException(const char* msg);
+
+/**
+ * Throw an IllegalMonitorStateException in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowIllegalMonitorStateException(const char* msg);
+
+/**
+ * Throw an IllegalStateException in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowIllegalStateException(const char* msg);
+
+/**
+ * Throw an IllegalThreadStateException in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowIllegalThreadStateException(const char* msg);
+
+/**
+ * Throw an IncompatibleClassChangeError in the current thread,
+ * the given detail message.
+ */
+void dvmThrowIncompatibleClassChangeError(const char* msg);
+
+/**
+ * Throw an IncompatibleClassChangeError in the current thread, with the
+ * human-readable form of the given descriptor as the detail message.
+ */
+void dvmThrowIncompatibleClassChangeErrorWithClassMessage(
+        const char* descriptor);
+
+/**
+ * Throw an InstantiationException in the current thread, with
+ * the human-readable form of the given class as the detail message,
+ * with optional extra detail appended to the message.
+ */
+void dvmThrowInstantiationException(ClassObject* clazz,
+        const char* extraDetail);
+
+/**
+ * Throw an InternalError in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowInternalError(const char* msg);
+
+/**
+ * Throw an InterruptedException in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowInterruptedException(const char* msg);
+
+/**
+ * Throw a LinkageError in the current thread, with the
+ * given detail message.
+ */
+void dvmThrowLinkageError(const char* msg);
+
+/**
+ * Throw a NegativeArraySizeException in the current thread, with the
+ * given number as the detail message.
+ */
+void dvmThrowNegativeArraySizeException(s4 size);
+
+/**
+ * Throw a NoClassDefFoundError in the current thread, with the
+ * human-readable form of the given descriptor as the detail message.
+ */
+void dvmThrowNoClassDefFoundError(const char* descriptor);
+
+/**
+ * Throw a NoClassDefFoundError in the current thread, with the given
+ * cause, and the human-readable form of the given descriptor as the
+ * detail message.
+ */
+void dvmThrowChainedNoClassDefFoundError(const char* descriptor,
+        Object* cause);
+
+/**
+ * Throw a NoSuchFieldError in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowNoSuchFieldError(const char* msg);
+
+/**
+ * Throw a NoSuchFieldException in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowNoSuchFieldException(const char* msg);
+
+/**
+ * Throw a NoSuchMethodError in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowNoSuchMethodError(const char* msg);
+
+/**
+ * Throw a NullPointerException in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowNullPointerException(const char* msg);
+
+/**
+ * Throw an OutOfMemoryError in the current thread, with the given
+ * detail message.
+ */
+void dvmThrowOutOfMemoryError(const char* msg);
+
+/**
+ * Throw a RuntimeException in the current thread, with the given detail
+ * message.
+ */
+void dvmThrowRuntimeException(const char* msg);
+
+/**
+ * Throw a StaleDexCacheError in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowStaleDexCacheError(const char* msg);
+
+/**
+ * Throw a StringIndexOutOfBoundsException in the current thread, with
+ * a detail message specifying an actual length as well as a requested
+ * index.
+ */
+void dvmThrowStringIndexOutOfBoundsExceptionWithIndex(jsize stringLength,
+        jsize requestIndex);
+
+/**
+ * Throw a StringIndexOutOfBoundsException in the current thread, with
+ * a detail message specifying an actual length as well as a requested
+ * region.
+ */
+void dvmThrowStringIndexOutOfBoundsExceptionWithRegion(jsize stringLength,
+        jsize requestStart, jsize requestLength);
+
+/**
+ * Throw a TypeNotPresentException in the current thread, with the
+ * human-readable form of the given descriptor as the detail message.
+ */
+void dvmThrowTypeNotPresentException(const char* descriptor);
+
+/**
+ * Throw an UnsatisfiedLinkError in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowUnsatisfiedLinkError(const char* msg);
+
+/**
+ * Throw an UnsupportedOperationException in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowUnsupportedOperationException(const char* msg);
+
+/**
+ * Throw a VerifyError in the current thread, with the
+ * human-readable form of the given descriptor as the detail message.
+ */
+void dvmThrowVerifyError(const char* descriptor);
+
+/**
+ * Throw a VirtualMachineError in the current thread, with
+ * the given detail message.
+ */
+void dvmThrowVirtualMachineError(const char* msg);
+
 #endif /*_DALVIK_EXCEPTION*/
diff --git a/vm/Globals.h b/vm/Globals.h
index 5dd914f..28cd173 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -29,6 +29,7 @@
 #ifndef _DALVIK_GLOBALS
 #define _DALVIK_GLOBALS
 
+#include <cutils/array.h>
 #include <stdarg.h>
 #include <pthread.h>
 
@@ -62,6 +63,37 @@
 } ExecutionMode;
 
 /*
+ * Execution sub modes, e.g. debugging, profiling, etc.
+ * Treated as bit flags for fast access.  These values are used directly
+ * by assembly code in the mterp interpeter and may also be used by
+ * code generated by the JIT.  Take care when changing.
+ */
+typedef enum ExecutionSubModes {
+    kSubModeNormal         = 0x00,
+    kSubModeMethodTrace    = 0x01,
+    kSubModeEmulatorTrace  = 0x02,
+    kSubModeInstCounting   = 0x04,
+    kSubModeDebuggerActive = 0x08,
+    kSubModeSuspendRequest = 0x10,  /* Set if any suspend request active */
+} ExecutionSubModes;
+
+/*
+ * Register map generation mode.  Only applicable when generateRegisterMaps
+ * is enabled.  (The "disabled" state is not folded into this because
+ * there are callers like dexopt that want to enable/disable without
+ * specifying the configuration details.)
+ *
+ * "TypePrecise" is slower and requires additional storage for the register
+ * maps, but allows type-precise GC.  "LivePrecise" is even slower and
+ * requires additional heap during processing, but allows live-precise GC.
+ */
+typedef enum {
+    kRegisterMapModeUnknown = 0,
+    kRegisterMapModeTypePrecise,
+    kRegisterMapModeLivePrecise
+} RegisterMapMode;
+
+/*
  * All fields are initialized to zero.
  *
  * Storage allocated here must be freed by a subsystem shutdown function or
@@ -120,6 +152,9 @@
     DexOptimizerMode    dexOptMode;
     DexClassVerifyMode  classVerifyMode;
 
+    bool        generateRegisterMaps;
+    RegisterMapMode     registerMapMode;
+
     bool        monitorVerification;
 
     bool        dexOptForSmp;
@@ -130,7 +165,6 @@
     bool        preciseGc;
     bool        preVerify;
     bool        postVerify;
-    bool        generateRegisterMaps;
     bool        concurrentMarkSweep;
     bool        verifyCardTable;
     bool        disableExplicitGc;
@@ -144,15 +178,14 @@
      * VM init management.
      */
     bool        initializing;
-    int         initExceptionCount;
     bool        optimizing;
 
     /*
-     * java.lang.System properties set from the command line.
+     * java.lang.System properties set from the command line with -D.
+     * This is effectively a set, where later entries override earlier
+     * ones.
      */
-    int         numProps;
-    int         maxProps;
-    char**      propList;
+    Array*      properties;
 
     /*
      * Where the VM goes to find system classes.
@@ -202,16 +235,12 @@
      */
     ClassObject* classJavaLangClass;
     ClassObject* classJavaLangClassArray;
-    ClassObject* classJavaLangError;
     ClassObject* classJavaLangObject;
     ClassObject* classJavaLangObjectArray;
-    ClassObject* classJavaLangRuntimeException;
     ClassObject* classJavaLangString;
     ClassObject* classJavaLangThread;
     ClassObject* classJavaLangVMThread;
     ClassObject* classJavaLangThreadGroup;
-    ClassObject* classJavaLangThrowable;
-    ClassObject* classJavaLangStackOverflowError;
     ClassObject* classJavaLangStackTraceElement;
     ClassObject* classJavaLangStackTraceElementArray;
     ClassObject* classJavaLangAnnotationAnnotationArray;
@@ -224,15 +253,59 @@
     ClassObject* classJavaLangReflectMethod;
     ClassObject* classJavaLangReflectMethodArray;
     ClassObject* classJavaLangReflectProxy;
-    ClassObject* classJavaLangExceptionInInitializerError;
-    ClassObject* classJavaLangRefPhantomReference;
-    ClassObject* classJavaLangRefReference;
     ClassObject* classJavaNioReadWriteDirectByteBuffer;
     ClassObject* classJavaSecurityAccessController;
     ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationFactory;
     ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMember;
     ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMemberArray;
 
+    /*
+     * classes representing exception types. The names here don't include
+     * packages, just to keep the use sites a bit less verbose. All are
+     * in java.lang, except where noted.
+     */
+    ClassObject* exAbstractMethodError;
+    ClassObject* exArithmeticException;
+    ClassObject* exArrayIndexOutOfBoundsException;
+    ClassObject* exArrayStoreException;
+    ClassObject* exClassCastException;
+    ClassObject* exClassCircularityError;
+    ClassObject* exClassFormatError;
+    ClassObject* exClassNotFoundException;
+    ClassObject* exError;
+    ClassObject* exExceptionInInitializerError;
+    ClassObject* exFileNotFoundException; /* in java.io */
+    ClassObject* exIOException;           /* in java.io */
+    ClassObject* exIllegalAccessError;
+    ClassObject* exIllegalAccessException;
+    ClassObject* exIllegalArgumentException;
+    ClassObject* exIllegalMonitorStateException;
+    ClassObject* exIllegalStateException;
+    ClassObject* exIllegalThreadStateException;
+    ClassObject* exIncompatibleClassChangeError;
+    ClassObject* exInstantiationError;
+    ClassObject* exInstantiationException;
+    ClassObject* exInternalError;
+    ClassObject* exInterruptedException;
+    ClassObject* exLinkageError;
+    ClassObject* exNegativeArraySizeException;
+    ClassObject* exNoClassDefFoundError;
+    ClassObject* exNoSuchFieldError;
+    ClassObject* exNoSuchFieldException;
+    ClassObject* exNoSuchMethodError;
+    ClassObject* exNullPointerException;
+    ClassObject* exOutOfMemoryError;
+    ClassObject* exRuntimeException;
+    ClassObject* exStackOverflowError;
+    ClassObject* exStaleDexCacheError;    /* in dalvik.system */
+    ClassObject* exStringIndexOutOfBoundsException;
+    ClassObject* exThrowable;
+    ClassObject* exTypeNotPresentException;
+    ClassObject* exUnsatisfiedLinkError;
+    ClassObject* exUnsupportedOperationException;
+    ClassObject* exVerifyError;
+    ClassObject* exVirtualMachineError;
+
     /* synthetic classes for arrays of primitives */
     ClassObject* classArrayBoolean;
     ClassObject* classArrayChar;
@@ -253,7 +326,6 @@
     int         offJavaLangClass_pd;
 
     /* field offsets - String */
-    int         javaLangStringReady;    /* 0=not init, 1=ready, -1=initing */
     int         offJavaLangString_value;
     int         offJavaLangString_count;
     int         offJavaLangString_offset;
@@ -301,14 +373,8 @@
     /* method pointers - java.lang.ref.Reference */
     Method*     methJavaLangRefReference_enqueueInternal;
 
-    /* method pointers - java.security.AccessController */
-    volatile int javaSecurityAccessControllerReady;
-    Method*     methJavaSecurityAccessController_doPrivileged[4];
-
     /* constructor method pointers; no vtable involved, so use Method* */
     Method*     methJavaLangStackTraceElement_init;
-    Method*     methJavaLangExceptionInInitializerError_init;
-    Method*     methJavaLangRefPhantomReference_init;
     Method*     methJavaLangReflectConstructor_init;
     Method*     methJavaLangReflectField_init;
     Method*     methJavaLangReflectMethod_init;
@@ -333,7 +399,7 @@
     int         offJavaNioBuffer_effectiveDirectAddress;
 
     /*
-     * VM-synthesized primitive classes, for arrays.
+     * VM-synthesized primitive classes, for arrays and reflection
      */
     ClassObject* volatile primitiveClass[PRIM_MAX];
 
@@ -392,8 +458,8 @@
     pthread_cond_t  threadSuspendCountCond;
 
     /*
-     * Sum of all threads' suspendCount fields.  The JIT needs to know if any
-     * thread is suspended.  Guarded by threadSuspendCountLock.
+     * Sum of all threads' suspendCount fields. Guarded by
+     * threadSuspendCountLock.
      */
     int  sumThreadSuspendCount;
 
@@ -430,12 +496,10 @@
     /*
      * JNI global reference table.
      */
-#ifdef USE_INDIRECT_REF
     IndirectRefTable jniGlobalRefTable;
-#else
-    ReferenceTable  jniGlobalRefTable;
-#endif
+    IndirectRefTable jniWeakGlobalRefTable;
     pthread_mutex_t jniGlobalRefLock;
+    pthread_mutex_t jniWeakGlobalRefLock;
     int         jniGlobalRefHiMark;
     int         jniGlobalRefLoMark;
 
@@ -534,12 +598,8 @@
 
     /*
      * JDWP debugger support.
-     *
-     * Note "debuggerActive" is accessed from mterp, so its storage size and
-     * meaning must not be changed without updating the assembly sources.
      */
     bool        debuggerConnected;      /* debugger or DDMS is connected */
-    u1          debuggerActive;         /* debugger is making requests */
     JdwpState*  jdwpState;
 
     /*
@@ -579,27 +639,20 @@
     int             allocRecordHead;        /* most-recently-added entry */
     int             allocRecordCount;       /* #of valid entries */
 
-#ifdef WITH_DEADLOCK_PREDICTION
-    /* global lock on history tree accesses */
-    pthread_mutex_t deadlockHistoryLock;
-
-    enum { kDPOff=0, kDPWarn, kDPErr, kDPAbort } deadlockPredictMode;
-#endif
-
     /*
-     * When a profiler is enabled, this is incremented.  Distinct profilers
-     * include "dmtrace" method tracing, emulator method tracing, and
-     * possibly instruction counting.
-     *
-     * The purpose of this is to have a single value that the interpreter
-     * can check to see if any profiling activity is enabled.
+     * When normal control flow needs to be interrupted because
+     * of an attached debugger, profiler, thread stop request, etc.,
+     * a bit is set here.  We collapse all stop reasons into
+     * a single location for performance reasons.
      */
-    volatile int activeProfilers;
+    volatile int interpBreak;
 
     /*
      * State for method-trace profiling.
      */
     MethodTraceState methodTrace;
+    Method*     methodTraceGcMethod;
+    Method*     methodTraceClassPrepMethod;
 
     /*
      * State for emulator tracing.
@@ -666,6 +719,14 @@
 
 #if defined(WITH_JIT)
 
+/* Trace profiling modes.  Ordering matters - off states before on states */
+typedef enum TraceProfilingModes {
+    kTraceProfilingDisabled = 0,      // Not profiling
+    kTraceProfilingPeriodicOff = 1,   // Periodic profiling, off phase
+    kTraceProfilingContinuous = 2,    // Always profiling
+    kTraceProfilingPeriodicOn = 3     // Periodic profiling, on phase
+} TraceProfilingModes;
+
 /*
  * Exiting the compiled code w/o chaining will incur overhead to look up the
  * target in the code cache which is extra work only when JIT is enabled. So
@@ -704,9 +765,12 @@
      * are stored in each thread. */
     struct JitEntry *pJitEntryTable;
 
-    /* Array of profile threshold counters */
+    /* Array of compilation trigger threshold counters */
     unsigned char *pProfTable;
 
+    /* Trace profiling counters */
+    struct JitTraceProfCounters *pJitTraceProfCounters;
+
     /* Copy of pProfTable used for temporarily disabling the Jit */
     unsigned char *pProfTableCopy;
 
@@ -728,6 +792,9 @@
     /* JIT Compiler Control */
     bool               haltCompilerThread;
     bool               blockingMode;
+    bool               methodTraceSupport;
+    bool               genSuspendPoll;
+    Thread*            compilerThread;
     pthread_t          compilerHandle;
     pthread_mutex_t    compilerLock;
     pthread_mutex_t    compilerICPatchLock;
@@ -746,6 +813,20 @@
     /* Compiled code cache */
     void* codeCache;
 
+    /*
+     * This is used to store the base address of an in-flight compilation whose
+     * class object pointers have been calculated to populate literal pool.
+     * Once the compiler thread has changed its status to VM_WAIT, we cannot
+     * guarantee whether GC has happened before the code address has been
+     * installed to the JIT table. Because of that, this field can only
+     * been cleared/overwritten by the compiler thread if it is in the
+     * THREAD_RUNNING state or in a safe point.
+     */
+    void *inflightBaseAddr;
+
+    /* Translation cache version (protected by compilerLock */
+    int cacheVersion;
+
     /* Bytes used by the code templates */
     unsigned int templateSize;
 
@@ -785,8 +866,11 @@
     /* Flag to dump all compiled code */
     bool printMe;
 
-    /* Flag to count trace execution */
-    bool profile;
+    /* Trace profiling mode */
+    TraceProfilingModes profileMode;
+
+    /* Periodic trace profiling countdown timer */
+    int profileCountdown;
 
     /* Vector to disable selected optimizations */
     int disableOpt;
@@ -840,8 +924,12 @@
     int                icPatchQueued;
     int                icPatchRejected;
     int                icPatchDropped;
-    u8                 jitTime;
     int                codeCachePatches;
+    int                numCompilerThreadBlockGC;
+    u8                 jitTime;
+    u8                 compilerThreadBlockGCStart;
+    u8                 compilerThreadBlockGCTime;
+    u8                 maxCompilerThreadBlockGCTime;
 #endif
 
     /* Place arrays at the end to ease the display in gdb sessions */
diff --git a/vm/IndirectRefTable.c b/vm/IndirectRefTable.c
index dadd03f..0a69f3c 100644
--- a/vm/IndirectRefTable.c
+++ b/vm/IndirectRefTable.c
@@ -27,7 +27,7 @@
 {
     assert(initialCount > 0);
     assert(initialCount <= maxCount);
-    assert(kind == kIndirectKindLocal || kind == kIndirectKindGlobal);
+    assert(kind != kIndirectKindInvalid);
 
     pRef->table = (Object**) malloc(initialCount * sizeof(Object*));
     if (pRef->table == NULL)
@@ -299,7 +299,7 @@
         updateSlotRemove(pRef, idx);
 
 #ifndef NDEBUG
-        pRef->table[idx] = (IndirectRef) 0xd3d3d3d3;
+        pRef->table[idx] = (Object*)0xd3d3d3d3;
 #endif
 
         int numHoles =
diff --git a/vm/IndirectRefTable.h b/vm/IndirectRefTable.h
index 6a4db04..f5157cb 100644
--- a/vm/IndirectRefTable.h
+++ b/vm/IndirectRefTable.h
@@ -236,7 +236,7 @@
  */
 INLINE IndirectRefKind dvmGetIndirectRefType(IndirectRef iref)
 {
-    return (u4) iref & 0x03;
+    return (IndirectRefKind)((u4) iref & 0x03);
 }
 
 /*
diff --git a/vm/Init.c b/vm/Init.c
index d1ff157..33d808b 100644
--- a/vm/Init.c
+++ b/vm/Init.c
@@ -106,7 +106,7 @@
     dvmFprintf(stderr, "\n");
     dvmFprintf(stderr, "These are unique to Dalvik:\n");
     dvmFprintf(stderr, "  -Xzygote\n");
-    dvmFprintf(stderr, "  -Xdexopt:{none,verified,all}\n");
+    dvmFprintf(stderr, "  -Xdexopt:{none,verified,all,full}\n");
     dvmFprintf(stderr, "  -Xnoquithandler\n");
     dvmFprintf(stderr,
                 "  -Xjnigreflimit:N  (must be multiple of 100, >= 200)\n");
@@ -120,7 +120,7 @@
     dvmFprintf(stderr, "  -Xgc:[no]concurrent\n");
     dvmFprintf(stderr, "  -Xgc:[no]verifycardtable\n");
     dvmFprintf(stderr, "  -XX:+DisableExplicitGC\n");
-    dvmFprintf(stderr, "  -Xgenregmap\n");
+    dvmFprintf(stderr, "  -X[no]genregmap\n");
     dvmFprintf(stderr, "  -Xverifyopt:[no]checkmon\n");
     dvmFprintf(stderr, "  -Xcheckdexsum\n");
 #if defined(WITH_JIT)
@@ -136,21 +136,13 @@
     dvmFprintf(stderr, "  -Xjitverbose\n");
     dvmFprintf(stderr, "  -Xjitprofile\n");
     dvmFprintf(stderr, "  -Xjitdisableopt\n");
+    dvmFprintf(stderr, "  -Xjitsuspendpoll\n");
 #endif
     dvmFprintf(stderr, "\n");
     dvmFprintf(stderr, "Configured with:"
         " debugger"
         " profiler"
         " hprof"
-#ifdef WITH_HPROF_STACK
-        " hprof_stack"
-#endif
-#ifdef WITH_MONITOR_TRACKING
-        " monitor_tracking"
-#endif
-#ifdef WITH_DEADLOCK_PREDICTION
-        " deadlock_prediction"
-#endif
 #ifdef WITH_TRACKREF_CHECKS
         " trackref_checks"
 #endif
@@ -187,9 +179,6 @@
 #if ANDROID_SMP != 0
         " smp"
 #endif
-#ifdef WITH_INLINE_PROFILING
-        " inline_profiling"
-#endif
     );
 #ifdef DVM_SHOW_EXCEPTION
     dvmFprintf(stderr, " show_exception=%d", DVM_SHOW_EXCEPTION);
@@ -758,8 +747,17 @@
             gDvm.bootClassPathStr = allPath;
 
         } else if (strncmp(argv[i], "-D", 2) == 0) {
-            /* set property */
-            dvmAddCommandLineProperty(argv[i] + 2);
+            /* Properties are handled in managed code. We just check syntax. */
+            if (strchr(argv[i], '=') == NULL) {
+                dvmFprintf(stderr, "Bad system property setting: \"%s\"\n",
+                    argv[i]);
+                return -1;
+            }
+            if (arrayAdd(gDvm.properties, strdup(argv[i] + 2)) == -1) {
+                dvmFprintf(stderr, "Can't set system property: \"%s\"\n",
+                    argv[i]);
+                return -1;
+            }
 
         } else if (strcmp(argv[i], "-jar") == 0) {
             // TODO: handle this; name of jar should be in argv[i+1]
@@ -889,6 +887,8 @@
                 gDvm.dexOptMode = OPTIMIZE_MODE_VERIFIED;
             else if (strcmp(argv[i] + 9, "all") == 0)
                 gDvm.dexOptMode = OPTIMIZE_MODE_ALL;
+            else if (strcmp(argv[i] + 9, "full") == 0)
+                gDvm.dexOptMode = OPTIMIZE_MODE_FULL;
             else {
                 dvmFprintf(stderr, "Unrecognized dexopt option '%s'\n",argv[i]);
                 return -1;
@@ -960,7 +960,7 @@
         } else if (strncmp(argv[i], "-Xjitverbose", 12) == 0) {
           gDvmJit.printMe = true;
         } else if (strncmp(argv[i], "-Xjitprofile", 12) == 0) {
-          gDvmJit.profile = true;
+          gDvmJit.profileMode = kTraceProfilingContinuous;
         } else if (strncmp(argv[i], "-Xjitdisableopt", 15) == 0) {
           /* Disable selected optimizations */
           if (argv[i][15] == ':') {
@@ -969,24 +969,8 @@
           } else {
               gDvmJit.disableOpt = -1;
           }
-#endif
-
-        } else if (strncmp(argv[i], "-Xdeadlockpredict:", 18) == 0) {
-#ifdef WITH_DEADLOCK_PREDICTION
-            if (strcmp(argv[i] + 18, "off") == 0)
-                gDvm.deadlockPredictMode = kDPOff;
-            else if (strcmp(argv[i] + 18, "warn") == 0)
-                gDvm.deadlockPredictMode = kDPWarn;
-            else if (strcmp(argv[i] + 18, "err") == 0)
-                gDvm.deadlockPredictMode = kDPErr;
-            else if (strcmp(argv[i] + 18, "abort") == 0)
-                gDvm.deadlockPredictMode = kDPAbort;
-            else {
-                dvmFprintf(stderr, "Bad value for -Xdeadlockpredict");
-                return -1;
-            }
-            if (gDvm.deadlockPredictMode != kDPOff)
-                LOGD("Deadlock prediction enabled (%s)\n", argv[i]+18);
+        } else if (strncmp(argv[i], "-Xjitsuspendpoll", 16) == 0) {
+          gDvmJit.genSuspendPoll = true;
 #endif
 
         } else if (strncmp(argv[i], "-Xstacktracefile:", 17) == 0) {
@@ -994,7 +978,8 @@
 
         } else if (strcmp(argv[i], "-Xgenregmap") == 0) {
             gDvm.generateRegisterMaps = true;
-            LOGV("Register maps will be generated during verification\n");
+        } else if (strcmp(argv[i], "-Xnogenregmap") == 0) {
+            gDvm.generateRegisterMaps = false;
 
         } else if (strcmp(argv[i], "Xverifyopt:checkmon") == 0) {
             gDvm.monitorVerification = true;
@@ -1084,6 +1069,8 @@
     gDvm.classVerifyMode = VERIFY_MODE_ALL;
     gDvm.dexOptMode = OPTIMIZE_MODE_VERIFIED;
     gDvm.monitorVerification = false;
+    gDvm.generateRegisterMaps = true;
+    gDvm.registerMapMode = kRegisterMapModeTypePrecise;
 
     /*
      * Default execution mode.
@@ -1174,12 +1161,12 @@
     for (i = 0; i < argc; i++)
         LOGV("  %d: '%s'\n", i, argv[i]);
 
-    setCommandLineDefaults();
-
     /* prep properties storage */
-    if (!dvmPropertiesStartup(argc))
+    if (!dvmPropertiesStartup())
         goto fail;
 
+    setCommandLineDefaults();
+
     /*
      * Process the option flags (if any).
      */
@@ -1240,12 +1227,16 @@
         goto fail;
     if (!dvmClassStartup())
         goto fail;
-    if (!dvmBaseClassStartup())
+
+    /*
+     * At this point, the system is guaranteed to be sufficiently
+     * initialized that we can look up classes and class members. This
+     * call populates the gDvm instance with all the class and member
+     * references that the VM wants to use directly.
+     */
+    if (!dvmFindRequiredClassesAndMembers())
         goto fail;
-    if (!dvmThreadObjStartup())
-        goto fail;
-    if (!dvmExceptionStartup())
-        goto fail;
+
     if (!dvmStringInternStartup())
         goto fail;
     if (!dvmNativeStartup())
@@ -1254,35 +1245,15 @@
         goto fail;
     if (!dvmJniStartup())
         goto fail;
-    if (!dvmReflectStartup())
-        goto fail;
     if (!dvmProfilingStartup())
         goto fail;
 
-    /* make sure we got these [can this go away?] */
-    assert(gDvm.classJavaLangClass != NULL);
-    assert(gDvm.classJavaLangObject != NULL);
-    //assert(gDvm.classJavaLangString != NULL);
-    assert(gDvm.classJavaLangThread != NULL);
-    assert(gDvm.classJavaLangVMThread != NULL);
-    assert(gDvm.classJavaLangThreadGroup != NULL);
-
     /*
-     * Make sure these exist.  If they don't, we can return a failure out
-     * of main and nip the whole thing in the bud.
+     * Create a table of methods for which we will substitute an "inline"
+     * version for performance.
      */
-    static const char* earlyClasses[] = {
-        "Ljava/lang/InternalError;",
-        "Ljava/lang/StackOverflowError;",
-        "Ljava/lang/UnsatisfiedLinkError;",
-        "Ljava/lang/NoClassDefFoundError;",
-        NULL
-    };
-    const char** pClassName;
-    for (pClassName = earlyClasses; *pClassName != NULL; pClassName++) {
-        if (dvmFindSystemClassNoInit(*pClassName) == NULL)
-            goto fail;
-    }
+    if (!dvmCreateInlineSubsTable())
+        goto fail;
 
     /*
      * Miscellaneous class library validation.
@@ -1343,9 +1314,6 @@
     if (!dvmDebuggerStartup())
         goto fail;
 
-    if (!dvmInlineNativeCheck())
-        goto fail;
-
     /*
      * Init for either zygote mode or non-zygote mode.  The key difference
      * is that we don't start any additional threads in Zygote mode.
@@ -1367,7 +1335,6 @@
 #endif
 
     assert(!dvmCheckException(dvmThreadSelf()));
-    gDvm.initExceptionCount = 0;
 
     return 0;
 
@@ -1684,11 +1651,9 @@
         LOGD("VM cleaning up\n");
 
     dvmDebuggerShutdown();
-    dvmReflectShutdown();
     dvmProfilingShutdown();
     dvmJniShutdown();
     dvmStringInternShutdown();
-    dvmExceptionShutdown();
     dvmThreadShutdown();
     dvmClassShutdown();
     dvmRegisterMapShutdown();
@@ -1702,6 +1667,8 @@
     dvmNativeShutdown();
     dvmInternalNativeShutdown();
 
+    dvmFreeInlineSubsTable();
+
     free(gDvm.bootClassPathStr);
     free(gDvm.classPathStr);
 
diff --git a/vm/Init.h b/vm/Init.h
index 63051a2..4d85c2e 100644
--- a/vm/Init.h
+++ b/vm/Init.h
@@ -41,6 +41,24 @@
     DexClassVerifyMode verifyMode, int dexoptFlags);
 
 /*
+ * Look up the set of classes and members used directly by the VM,
+ * storing references to them into the globals instance. See
+ * Globals.h. This function is exposed so that dex optimization may
+ * call it (while avoiding doing other unnecessary VM initialization).
+ *
+ * The function returns a success flag (true == success).
+ */
+bool dvmFindRequiredClassesAndMembers(void);
+
+/*
+ * Look up required members of the class Reference, and set the global
+ * reference to Reference itself too. This needs to be done separately
+ * from dvmFindRequiredClassesAndMembers(), during the course of
+ * linking the class Reference (which is done specially).
+ */
+bool dvmFindReferenceMembers(ClassObject* classReference);
+
+/*
  * Replacement for fprintf() when we want to send a message to the console.
  * This defaults to fprintf(), but will use the JNI fprintf callback if
  * one was provided.
diff --git a/vm/InitRefs.c b/vm/InitRefs.c
new file mode 100644
index 0000000..211ecc4
--- /dev/null
+++ b/vm/InitRefs.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Code to initialize references to classes and members for use by
+ * lower-level VM facilities
+ */
+
+#include "Dalvik.h"
+
+/*
+ * Helper for dvmInitRequiredClassesAndMembers(), which looks up
+ * classes and stores them to the indicated pointer, returning a
+ * failure code (false == failure).
+ */
+static bool initRef(ClassObject** pClass, const char* name) {
+    ClassObject* result;
+
+    if (*pClass != NULL) {
+        /*
+         * There are a couple cases where it's legit to call this
+         * function with an already-initialized reference, so just
+         * silently tolerate this instead of complaining loudly.
+         */
+        return true;
+    }
+
+    if (name[0] == '[') {
+        result = dvmFindArrayClass(name, NULL);
+    } else {
+        result = dvmFindSystemClassNoInit(name);
+    }
+
+    if (result == NULL) {
+        LOGE("Could not find essential class %s\n", name);
+        return false;
+    }
+
+    *pClass = result;
+    return true;
+}
+
+static bool find1(void) {
+    /*
+     * Note: Under normal VM use, this is called by dvmStartup()
+     * in Init.c. For dex optimization, this is called as well, but in
+     * that case, the call is made from DexPrepare.c.
+     */
+
+    bool ok = true;
+
+    /* The corest of the core classes */
+
+    ok &= initRef(&gDvm.classJavaLangClass, "Ljava/lang/Class;");
+    ok &= initRef(&gDvm.classJavaLangObject, "Ljava/lang/Object;");
+    ok &= initRef(&gDvm.exThrowable, "Ljava/lang/Throwable;");
+
+    ok &= initRef(&gDvm.classJavaLangString, "Ljava/lang/String;");
+    ok &= initRef(&gDvm.classJavaLangThread, "Ljava/lang/Thread;");
+    ok &= initRef(&gDvm.classJavaLangThreadGroup, "Ljava/lang/ThreadGroup;");
+    ok &= initRef(&gDvm.classJavaLangVMThread, "Ljava/lang/VMThread;");
+
+    /* Arrays of primitive types */
+
+    ok &= initRef(&gDvm.classArrayBoolean, "[Z");
+    ok &= initRef(&gDvm.classArrayByte, "[B");
+    ok &= initRef(&gDvm.classArrayShort, "[S");
+    ok &= initRef(&gDvm.classArrayChar, "[C");
+    ok &= initRef(&gDvm.classArrayInt, "[I");
+    ok &= initRef(&gDvm.classArrayLong, "[J");
+    ok &= initRef(&gDvm.classArrayFloat, "[F");
+    ok &= initRef(&gDvm.classArrayDouble, "[D");
+
+    /* Exception classes and related support classes */
+
+    ok &= initRef(&gDvm.exAbstractMethodError,
+            "Ljava/lang/AbstractMethodError;");
+    ok &= initRef(&gDvm.exArithmeticException,
+            "Ljava/lang/ArithmeticException;");
+    ok &= initRef(&gDvm.exArrayIndexOutOfBoundsException,
+            "Ljava/lang/ArrayIndexOutOfBoundsException;");
+    ok &= initRef(&gDvm.exArrayStoreException,
+            "Ljava/lang/ArrayStoreException;");
+    ok &= initRef(&gDvm.exClassCastException,
+            "Ljava/lang/ClassCastException;");
+    ok &= initRef(&gDvm.exClassCircularityError,
+            "Ljava/lang/ClassCircularityError;");
+    ok &= initRef(&gDvm.exClassNotFoundException,
+            "Ljava/lang/ClassNotFoundException;");
+    ok &= initRef(&gDvm.exClassFormatError, "Ljava/lang/ClassFormatError;");
+    ok &= initRef(&gDvm.exError, "Ljava/lang/Error;");
+    ok &= initRef(&gDvm.exExceptionInInitializerError,
+            "Ljava/lang/ExceptionInInitializerError;");
+    ok &= initRef(&gDvm.exFileNotFoundException,
+            "Ljava/io/FileNotFoundException;");
+    ok &= initRef(&gDvm.exIOException, "Ljava/io/IOException;");
+    ok &= initRef(&gDvm.exIllegalAccessError,
+            "Ljava/lang/IllegalAccessError;");
+    ok &= initRef(&gDvm.exIllegalAccessException,
+            "Ljava/lang/IllegalAccessException;");
+    ok &= initRef(&gDvm.exIllegalArgumentException,
+            "Ljava/lang/IllegalArgumentException;");
+    ok &= initRef(&gDvm.exIllegalMonitorStateException,
+            "Ljava/lang/IllegalMonitorStateException;");
+    ok &= initRef(&gDvm.exIllegalStateException,
+            "Ljava/lang/IllegalStateException;");
+    ok &= initRef(&gDvm.exIllegalThreadStateException,
+            "Ljava/lang/IllegalThreadStateException;");
+    ok &= initRef(&gDvm.exIncompatibleClassChangeError,
+            "Ljava/lang/IncompatibleClassChangeError;");
+    ok &= initRef(&gDvm.exInstantiationError,
+            "Ljava/lang/InstantiationError;");
+    ok &= initRef(&gDvm.exInstantiationException,
+            "Ljava/lang/InstantiationException;");
+    ok &= initRef(&gDvm.exInternalError,
+            "Ljava/lang/InternalError;");
+    ok &= initRef(&gDvm.exInterruptedException,
+            "Ljava/lang/InterruptedException;");
+    ok &= initRef(&gDvm.exLinkageError,
+            "Ljava/lang/LinkageError;");
+    ok &= initRef(&gDvm.exNegativeArraySizeException,
+            "Ljava/lang/NegativeArraySizeException;");
+    ok &= initRef(&gDvm.exNoClassDefFoundError,
+            "Ljava/lang/NoClassDefFoundError;");
+    ok &= initRef(&gDvm.exNoSuchFieldError,
+            "Ljava/lang/NoSuchFieldError;");
+    ok &= initRef(&gDvm.exNoSuchFieldException,
+            "Ljava/lang/NoSuchFieldException;");
+    ok &= initRef(&gDvm.exNoSuchMethodError,
+            "Ljava/lang/NoSuchMethodError;");
+    ok &= initRef(&gDvm.exNullPointerException,
+            "Ljava/lang/NullPointerException;");
+    ok &= initRef(&gDvm.exOutOfMemoryError,
+            "Ljava/lang/OutOfMemoryError;");
+    ok &= initRef(&gDvm.exRuntimeException, "Ljava/lang/RuntimeException;");
+    ok &= initRef(&gDvm.exStackOverflowError,
+            "Ljava/lang/StackOverflowError;");
+    ok &= initRef(&gDvm.exStaleDexCacheError,
+            "Ldalvik/system/StaleDexCacheError;");
+    ok &= initRef(&gDvm.exStringIndexOutOfBoundsException,
+            "Ljava/lang/StringIndexOutOfBoundsException;");
+    ok &= initRef(&gDvm.exTypeNotPresentException,
+            "Ljava/lang/TypeNotPresentException;");
+    ok &= initRef(&gDvm.exUnsatisfiedLinkError,
+            "Ljava/lang/UnsatisfiedLinkError;");
+    ok &= initRef(&gDvm.exUnsupportedOperationException,
+            "Ljava/lang/UnsupportedOperationException;");
+    ok &= initRef(&gDvm.exVerifyError,
+            "Ljava/lang/VerifyError;");
+    ok &= initRef(&gDvm.exVirtualMachineError,
+            "Ljava/lang/VirtualMachineError;");
+
+    ok &= initRef(&gDvm.classJavaLangStackTraceElement,
+            "Ljava/lang/StackTraceElement;");
+    ok &= initRef(&gDvm.classJavaLangStackTraceElementArray,
+            "[Ljava/lang/StackTraceElement;");
+
+    if (!ok) {
+        return false;
+    }
+
+    /*
+     * Find the StackTraceElement constructor. Note that, unlike other
+     * saved method lookups, we're using a Method* instead of a vtable
+     * offset. This is because constructors don't have vtable offsets.
+     * (Also, since we're creating the object in question, it's
+     * impossible for anyone to sub-class it.)
+     */
+    Method* meth;
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangStackTraceElement,
+        "<init>",
+        "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V");
+    if (meth == NULL) {
+        LOGE("Unable to find constructor for StackTraceElement\n");
+        return false;
+    }
+    gDvm.methJavaLangStackTraceElement_init = meth;
+
+    /* grab an offset for the field Throwable.stackState */
+    gDvm.offJavaLangThrowable_stackState =
+        dvmFindFieldOffset(gDvm.exThrowable,
+            "stackState", "Ljava/lang/Object;");
+    if (gDvm.offJavaLangThrowable_stackState < 0) {
+        LOGE("Unable to find Throwable.stackState\n");
+        return false;
+    }
+
+    /* and one for the field Throwable.cause, just 'cause */
+    gDvm.offJavaLangThrowable_cause =
+        dvmFindFieldOffset(gDvm.exThrowable,
+            "cause", "Ljava/lang/Throwable;");
+    if (gDvm.offJavaLangThrowable_cause < 0) {
+        LOGE("Unable to find Throwable.cause\n");
+        return false;
+    }
+
+    return true;
+}
+
+static bool find2(void) {
+    ClassObject* clClass = dvmFindSystemClassNoInit("Ljava/lang/ClassLoader;");
+    Method* meth = dvmFindVirtualMethodByDescriptor(clClass, "loadClass",
+            "(Ljava/lang/String;)Ljava/lang/Class;");
+    if (meth == NULL) {
+        LOGE("Unable to find loadClass() in java.lang.ClassLoader\n");
+        return false;
+    }
+    gDvm.voffJavaLangClassLoader_loadClass = meth->methodIndex;
+
+    return true;
+}
+
+static bool find3(void) {
+    assert(gDvm.classJavaLangThread != NULL);
+    assert(gDvm.classJavaLangThreadGroup != NULL);
+    assert(gDvm.classJavaLangVMThread != NULL);
+
+    /*
+     * Cache field offsets.  This makes things a little faster, at the
+     * expense of hard-coding non-public field names into the VM.
+     */
+    gDvm.offJavaLangThread_vmThread =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "vmThread", "Ljava/lang/VMThread;");
+    gDvm.offJavaLangThread_group =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "group", "Ljava/lang/ThreadGroup;");
+    gDvm.offJavaLangThread_daemon =
+        dvmFindFieldOffset(gDvm.classJavaLangThread, "daemon", "Z");
+    gDvm.offJavaLangThread_name =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "name", "Ljava/lang/String;");
+    gDvm.offJavaLangThread_priority =
+        dvmFindFieldOffset(gDvm.classJavaLangThread, "priority", "I");
+
+    if (gDvm.offJavaLangThread_vmThread < 0 ||
+        gDvm.offJavaLangThread_group < 0 ||
+        gDvm.offJavaLangThread_daemon < 0 ||
+        gDvm.offJavaLangThread_name < 0 ||
+        gDvm.offJavaLangThread_priority < 0)
+    {
+        LOGE("Unable to find all fields in java.lang.Thread\n");
+        return false;
+    }
+
+    gDvm.offJavaLangVMThread_thread =
+        dvmFindFieldOffset(gDvm.classJavaLangVMThread,
+            "thread", "Ljava/lang/Thread;");
+    gDvm.offJavaLangVMThread_vmData =
+        dvmFindFieldOffset(gDvm.classJavaLangVMThread, "vmData", "I");
+    if (gDvm.offJavaLangVMThread_thread < 0 ||
+        gDvm.offJavaLangVMThread_vmData < 0)
+    {
+        LOGE("Unable to find all fields in java.lang.VMThread\n");
+        return false;
+    }
+
+    /*
+     * Cache the vtable offset for "run()".
+     *
+     * We don't want to keep the Method* because then we won't find see
+     * methods defined in subclasses.
+     */
+    Method* meth;
+    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThread, "run", "()V");
+    if (meth == NULL) {
+        LOGE("Unable to find run() in java.lang.Thread\n");
+        return false;
+    }
+    gDvm.voffJavaLangThread_run = meth->methodIndex;
+
+    /*
+     * Cache vtable offsets for ThreadGroup methods.
+     */
+    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThreadGroup,
+        "removeThread", "(Ljava/lang/Thread;)V");
+    if (meth == NULL) {
+        LOGE("Unable to find removeThread(Thread) in java.lang.ThreadGroup\n");
+        return false;
+    }
+    gDvm.voffJavaLangThreadGroup_removeThread = meth->methodIndex;
+
+    return true;
+}
+
+static bool find4(void) {
+    Method* meth;
+
+    /*
+     * Look up and cache pointers to some direct buffer classes, fields,
+     * and methods.
+     */
+    ClassObject* readWriteBufferClass =
+        dvmFindSystemClassNoInit("Ljava/nio/ReadWriteDirectByteBuffer;");
+    ClassObject* bufferClass =
+        dvmFindSystemClassNoInit("Ljava/nio/Buffer;");
+
+    if (readWriteBufferClass == NULL || bufferClass == NULL) {
+        LOGE("Unable to find internal direct buffer classes\n");
+        return false;
+    }
+    gDvm.classJavaNioReadWriteDirectByteBuffer = readWriteBufferClass;
+
+    meth = dvmFindDirectMethodByDescriptor(readWriteBufferClass,
+                "<init>",
+                "(II)V");
+    if (meth == NULL) {
+        LOGE("Unable to find ReadWriteDirectByteBuffer.<init>\n");
+        return false;
+    }
+    gDvm.methJavaNioReadWriteDirectByteBuffer_init = meth;
+
+    gDvm.offJavaNioBuffer_capacity =
+        dvmFindFieldOffset(bufferClass, "capacity", "I");
+    if (gDvm.offJavaNioBuffer_capacity < 0) {
+        LOGE("Unable to find Buffer.capacity\n");
+        return false;
+    }
+
+    gDvm.offJavaNioBuffer_effectiveDirectAddress =
+        dvmFindFieldOffset(bufferClass, "effectiveDirectAddress", "I");
+    if (gDvm.offJavaNioBuffer_effectiveDirectAddress < 0) {
+        LOGE("Unable to find Buffer.effectiveDirectAddress\n");
+        return false;
+    }
+
+    return true;
+}
+
+static bool find5(void)
+{
+    gDvm.classJavaLangReflectAccessibleObject =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/AccessibleObject;");
+    gDvm.classJavaLangReflectConstructor =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Constructor;");
+    gDvm.classJavaLangReflectConstructorArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Constructor;", NULL);
+    gDvm.classJavaLangReflectField =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Field;");
+    gDvm.classJavaLangReflectFieldArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Field;", NULL);
+    gDvm.classJavaLangReflectMethod =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Method;");
+    gDvm.classJavaLangReflectMethodArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Method;", NULL);
+    gDvm.classJavaLangReflectProxy =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
+    if (gDvm.classJavaLangReflectAccessibleObject == NULL ||
+        gDvm.classJavaLangReflectConstructor == NULL ||
+        gDvm.classJavaLangReflectConstructorArray == NULL ||
+        gDvm.classJavaLangReflectField == NULL ||
+        gDvm.classJavaLangReflectFieldArray == NULL ||
+        gDvm.classJavaLangReflectMethod == NULL ||
+        gDvm.classJavaLangReflectMethodArray == NULL ||
+        gDvm.classJavaLangReflectProxy == NULL)
+    {
+        LOGE("Could not find one or more reflection classes\n");
+        return false;
+    }
+
+    gDvm.methJavaLangReflectConstructor_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectConstructor, "<init>",
+        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;I)V");
+    gDvm.methJavaLangReflectField_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectField, "<init>",
+        "(Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
+    gDvm.methJavaLangReflectMethod_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectMethod, "<init>",
+        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
+    if (gDvm.methJavaLangReflectConstructor_init == NULL ||
+        gDvm.methJavaLangReflectField_init == NULL ||
+        gDvm.methJavaLangReflectMethod_init == NULL)
+    {
+        LOGE("Could not find reflection constructors\n");
+        return false;
+    }
+
+    gDvm.classJavaLangClassArray =
+        dvmFindArrayClass("[Ljava/lang/Class;", NULL);
+    gDvm.classJavaLangObjectArray =
+        dvmFindArrayClass("[Ljava/lang/Object;", NULL);
+    if (gDvm.classJavaLangClassArray == NULL ||
+        gDvm.classJavaLangObjectArray == NULL)
+    {
+        LOGE("Could not find class-array or object-array class\n");
+        return false;
+    }
+
+    gDvm.offJavaLangReflectAccessibleObject_flag =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectAccessibleObject, "flag",
+            "Z");
+
+    gDvm.offJavaLangReflectConstructor_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor, "slot", "I");
+    gDvm.offJavaLangReflectConstructor_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor,
+            "declaringClass", "Ljava/lang/Class;");
+
+    gDvm.offJavaLangReflectField_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectField, "slot", "I");
+    gDvm.offJavaLangReflectField_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectField,
+            "declaringClass", "Ljava/lang/Class;");
+
+    gDvm.offJavaLangReflectMethod_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod, "slot", "I");
+    gDvm.offJavaLangReflectMethod_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod,
+            "declaringClass", "Ljava/lang/Class;");
+
+    if (gDvm.offJavaLangReflectAccessibleObject_flag < 0 ||
+        gDvm.offJavaLangReflectConstructor_slot < 0 ||
+        gDvm.offJavaLangReflectConstructor_declClass < 0 ||
+        gDvm.offJavaLangReflectField_slot < 0 ||
+        gDvm.offJavaLangReflectField_declClass < 0 ||
+        gDvm.offJavaLangReflectMethod_slot < 0 ||
+        gDvm.offJavaLangReflectMethod_declClass < 0)
+    {
+        LOGE("Could not find reflection fields\n");
+        return false;
+    }
+
+    return true;
+}
+
+static bool find6()
+{
+    /*
+     * Standard methods we must provide in our proxy.
+     */
+    Method* methE;
+    Method* methH;
+    Method* methT;
+    Method* methF;
+    methE = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
+                "equals", "(Ljava/lang/Object;)Z");
+    methH = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
+                "hashCode", "()I");
+    methT = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
+                "toString", "()Ljava/lang/String;");
+    methF = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
+                "finalize", "()V");
+    if (methE == NULL || methH == NULL || methT == NULL || methF == NULL) {
+        LOGE("Could not find equals/hashCode/toString/finalize in Object\n");
+        return false;
+    }
+    gDvm.voffJavaLangObject_equals = methE->methodIndex;
+    gDvm.voffJavaLangObject_hashCode = methH->methodIndex;
+    gDvm.voffJavaLangObject_toString = methT->methodIndex;
+    gDvm.voffJavaLangObject_finalize = methF->methodIndex;
+
+    /*
+     * The prototype signature needs to be cloned from a method in a
+     * "real" DEX file.  We declared this otherwise unused method just
+     * for this purpose.
+     */
+    ClassObject* proxyClass;
+    Method* meth;
+    proxyClass = dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
+    if (proxyClass == NULL) {
+        LOGE("No java.lang.reflect.Proxy\n");
+        return false;
+    }
+    meth = dvmFindDirectMethodByDescriptor(proxyClass, "constructorPrototype",
+                "(Ljava/lang/reflect/InvocationHandler;)V");
+    if (meth == NULL) {
+        LOGE("Could not find java.lang.Proxy.constructorPrototype()\n");
+        return false;
+    }
+    gDvm.methJavaLangReflectProxy_constructorPrototype = meth;
+
+    /*
+     * Get the offset of the "h" field in Proxy.
+     */
+    gDvm.offJavaLangReflectProxy_h = dvmFindFieldOffset(proxyClass, "h",
+        "Ljava/lang/reflect/InvocationHandler;");
+    if (gDvm.offJavaLangReflectProxy_h < 0) {
+        LOGE("Unable to find 'h' field in java.lang.Proxy\n");
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Perform Annotation setup.
+ */
+static bool find7(void)
+{
+    Method* meth;
+
+    /*
+     * Find some standard Annotation classes.
+     */
+    gDvm.classJavaLangAnnotationAnnotationArray =
+        dvmFindArrayClass("[Ljava/lang/annotation/Annotation;", NULL);
+    gDvm.classJavaLangAnnotationAnnotationArrayArray =
+        dvmFindArrayClass("[[Ljava/lang/annotation/Annotation;", NULL);
+    if (gDvm.classJavaLangAnnotationAnnotationArray == NULL ||
+        gDvm.classJavaLangAnnotationAnnotationArrayArray == NULL)
+    {
+        LOGE("Could not find Annotation-array classes\n");
+        return false;
+    }
+
+    /*
+     * VM-specific annotation classes.
+     */
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory =
+        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationFactory;");
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember =
+        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationMember;");
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray =
+        dvmFindArrayClass("[Lorg/apache/harmony/lang/annotation/AnnotationMember;", NULL);
+    if (gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory == NULL ||
+        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember == NULL ||
+        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray == NULL)
+    {
+        LOGE("Could not find android.lang annotation classes\n");
+        return false;
+    }
+
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory,
+            "createAnnotation",
+            "(Ljava/lang/Class;[Lorg/apache/harmony/lang/annotation/AnnotationMember;)Ljava/lang/annotation/Annotation;");
+    if (meth == NULL) {
+        LOGE("Unable to find createAnnotation() in android AnnotationFactory\n");
+        return false;
+    }
+    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation = meth;
+
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
+            "<init>",
+            "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V");
+    if (meth == NULL) {
+        LOGE("Unable to find 4-arg constructor in android AnnotationMember\n");
+        return false;
+    }
+
+    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationMember_init = meth;
+
+    return true;
+}
+
+static bool find8(void) {
+    ClassObject* clazz =
+        dvmFindClassNoInit("Ldalvik/system/VMDebug;", NULL);
+    assert(clazz != NULL);
+    gDvm.methodTraceGcMethod =
+        dvmFindDirectMethodByDescriptor(clazz, "startGC", "()V");
+    gDvm.methodTraceClassPrepMethod =
+        dvmFindDirectMethodByDescriptor(clazz, "startClassPrep", "()V");
+    if (gDvm.methodTraceGcMethod == NULL ||
+        gDvm.methodTraceClassPrepMethod == NULL)
+    {
+        LOGE("Unable to find startGC or startClassPrep\n");
+        return false;
+    }
+
+    return true;
+}
+
+static bool find9(void) {
+    gDvm.offJavaLangString_value =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "value", "[C");
+    gDvm.offJavaLangString_count =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "count", "I");
+    gDvm.offJavaLangString_offset =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "offset", "I");
+    gDvm.offJavaLangString_hashCode =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "hashCode", "I");
+
+    if (gDvm.offJavaLangString_value < 0 ||
+        gDvm.offJavaLangString_count < 0 ||
+        gDvm.offJavaLangString_offset < 0 ||
+        gDvm.offJavaLangString_hashCode < 0)
+    {
+        LOGE("VM-required field missing from java/lang/String\n");
+        return false;
+    }
+
+    bool badValue = false;
+    if (gDvm.offJavaLangString_value != STRING_FIELDOFF_VALUE) {
+        LOGE("InlineNative: String.value offset = %d, expected %d\n",
+            gDvm.offJavaLangString_value, STRING_FIELDOFF_VALUE);
+        badValue = true;
+    }
+    if (gDvm.offJavaLangString_count != STRING_FIELDOFF_COUNT) {
+        LOGE("InlineNative: String.count offset = %d, expected %d\n",
+            gDvm.offJavaLangString_count, STRING_FIELDOFF_COUNT);
+        badValue = true;
+    }
+    if (gDvm.offJavaLangString_offset != STRING_FIELDOFF_OFFSET) {
+        LOGE("InlineNative: String.offset offset = %d, expected %d\n",
+            gDvm.offJavaLangString_offset, STRING_FIELDOFF_OFFSET);
+        badValue = true;
+    }
+    if (gDvm.offJavaLangString_hashCode != STRING_FIELDOFF_HASHCODE) {
+        LOGE("InlineNative: String.hashCode offset = %d, expected %d\n",
+            gDvm.offJavaLangString_hashCode, STRING_FIELDOFF_HASHCODE);
+        badValue = true;
+    }
+    if (badValue)
+        return false;
+
+    return true;
+}
+
+/* (documented in header) */
+bool dvmFindRequiredClassesAndMembers(void) {
+    bool ok = true;
+
+    ok &= find1();
+    ok &= find2();
+    ok &= find3();
+    ok &= find4();
+    ok &= find5();
+    ok &= find6();
+    ok &= find7();
+    ok &= find8();
+    ok &= find9();
+
+    return ok;
+}
+
+/* (documented in header) */
+bool dvmFindReferenceMembers(ClassObject* classReference) {
+    if (gDvm.methJavaLangRefReference_enqueueInternal != NULL) {
+        LOGE("Attempt to set up class Reference more than once\n");
+        return false;
+    }
+
+    if (strcmp(classReference->descriptor, "Ljava/lang/ref/Reference;") != 0) {
+        LOGE("Attempt to set up the wrong class as Reference\n");
+        return false;
+    }
+
+    bool ok = true;
+
+    gDvm.offJavaLangRefReference_referent =
+        dvmFindFieldOffset(classReference, "referent", "Ljava/lang/Object;");
+    ok &= (gDvm.offJavaLangRefReference_referent >= 0);
+
+    gDvm.offJavaLangRefReference_queue =
+        dvmFindFieldOffset(classReference, "queue", "Ljava/lang/ref/ReferenceQueue;");
+    ok &= (gDvm.offJavaLangRefReference_queue >= 0);
+
+    gDvm.offJavaLangRefReference_queueNext =
+        dvmFindFieldOffset(classReference, "queueNext", "Ljava/lang/ref/Reference;");
+    ok &= (gDvm.offJavaLangRefReference_queueNext >= 0);
+
+    gDvm.offJavaLangRefReference_pendingNext =
+        dvmFindFieldOffset(classReference, "pendingNext", "Ljava/lang/ref/Reference;");
+    ok &= (gDvm.offJavaLangRefReference_pendingNext >= 0);
+
+    /* enqueueInternal() is private and thus a direct method. */
+    Method *meth = dvmFindDirectMethodByDescriptor(classReference, "enqueueInternal", "()Z");
+    ok &= (meth != NULL);
+    gDvm.methJavaLangRefReference_enqueueInternal = meth;
+
+    return ok;
+}
diff --git a/vm/InlineNative.c b/vm/InlineNative.c
index c426d4d..6b78878 100644
--- a/vm/InlineNative.c
+++ b/vm/InlineNative.c
@@ -124,21 +124,22 @@
 /*
  * public char charAt(int index)
  */
-static bool javaLangString_charAt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_charAt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     int count, offset;
     ArrayObject* chars;
 
     /* null reference check on "this" */
-    if (!dvmValidateObject((Object*) arg0))
+    if ((Object*) arg0 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
+    }
 
     //LOGI("String.charAt this=0x%08x index=%d\n", arg0, arg1);
     count = dvmGetFieldInt((Object*) arg0, STRING_FIELDOFF_COUNT);
     if ((s4) arg1 < 0 || (s4) arg1 >= count) {
-        dvmThrowExceptionFmt("Ljava/lang/StringIndexOutOfBoundsException;",
-            "index=%d length=%d", arg1, count);
+        dvmThrowStringIndexOutOfBoundsExceptionWithIndex(count, arg1);
         return false;
     } else {
         offset = dvmGetFieldInt((Object*) arg0, STRING_FIELDOFF_OFFSET);
@@ -195,7 +196,7 @@
 /*
  * public int compareTo(String s)
  */
-static bool javaLangString_compareTo(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_compareTo(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     /*
@@ -204,9 +205,8 @@
      * anything else.  While we're at it, check out the other string,
      * which must also be non-null.
      */
-    if (!dvmValidateObject((Object*) arg0) ||
-        !dvmValidateObject((Object*) arg1))
-    {
+    if ((Object*) arg0 == NULL || (Object*) arg1 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 
@@ -291,14 +291,16 @@
 /*
  * public boolean equals(Object anObject)
  */
-static bool javaLangString_equals(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_equals(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     /*
      * Null reference check on "this".
      */
-    if (!dvmValidateObject((Object*) arg0))
+    if ((Object*) arg0 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
+    }
 
     /* quick test for comparison with itself */
     if (arg0 == arg1) {
@@ -336,6 +338,21 @@
         return true;
     }
 
+    /*
+     * You may, at this point, be tempted to pull out the hashCode fields
+     * and compare them.  If both fields have been initialized, and they
+     * are not equal, we can return false immediately.
+     *
+     * However, the hashCode field is often not set.  If it is set,
+     * there's an excellent chance that the String is being used as a key
+     * in a hashed data structure (e.g. HashMap).  That data structure has
+     * already made the comparison and determined that the hashes are equal,
+     * making a check here redundant.
+     *
+     * It's not clear that checking the hashes will be a win in "typical"
+     * use cases.  We err on the side of simplicity and ignore them.
+     */
+
     thisOffset = dvmGetFieldInt((Object*) arg0, STRING_FIELDOFF_OFFSET);
     compOffset = dvmGetFieldInt((Object*) arg1, STRING_FIELDOFF_OFFSET);
     thisArray = (ArrayObject*)
@@ -383,14 +400,16 @@
 /*
  * public int length()
  */
-static bool javaLangString_length(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_length(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     //LOGI("String.length this=0x%08x pResult=%p\n", arg0, pResult);
 
     /* null reference check on "this" */
-    if (!dvmValidateObject((Object*) arg0))
+    if ((Object*) arg0 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
+    }
 
     pResult->i = dvmGetFieldInt((Object*) arg0, STRING_FIELDOFF_COUNT);
     return true;
@@ -399,14 +418,16 @@
 /*
  * public boolean isEmpty()
  */
-static bool javaLangString_isEmpty(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_isEmpty(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     //LOGI("String.isEmpty this=0x%08x pResult=%p\n", arg0, pResult);
 
     /* null reference check on "this" */
-    if (!dvmValidateObject((Object*) arg0))
+    if ((Object*) arg0 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
+    }
 
     pResult->i = (dvmGetFieldInt((Object*) arg0, STRING_FIELDOFF_COUNT) == 0);
     return true;
@@ -470,12 +491,14 @@
  * The character must be <= 0xffff; this method does not handle supplementary
  * characters.
  */
-static bool javaLangString_fastIndexOf_II(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangString_fastIndexOf_II(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     /* null reference check on "this" */
-    if (!dvmValidateObject((Object*) arg0))
+    if ((Object*) arg0 == NULL) {
+        dvmThrowNullPointerException(NULL);
         return false;
+    }
 
     pResult->i = indexOfCommon((Object*) arg0, arg1, arg2);
     return true;
@@ -502,7 +525,7 @@
 /*
  * public static int abs(int)
  */
-static bool javaLangMath_abs_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_abs_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     s4 val = (s4) arg0;
@@ -513,7 +536,7 @@
 /*
  * public static long abs(long)
  */
-static bool javaLangMath_abs_long(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_abs_long(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert64 convert;
@@ -527,7 +550,7 @@
 /*
  * public static float abs(float)
  */
-static bool javaLangMath_abs_float(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_abs_float(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert32 convert;
@@ -540,7 +563,7 @@
 /*
  * public static double abs(double)
  */
-static bool javaLangMath_abs_double(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_abs_double(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert64 convert;
@@ -555,7 +578,7 @@
 /*
  * public static int min(int)
  */
-static bool javaLangMath_min_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_min_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     pResult->i = ((s4) arg0 < (s4) arg1) ? arg0 : arg1;
@@ -565,7 +588,7 @@
 /*
  * public static int max(int)
  */
-static bool javaLangMath_max_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_max_int(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     pResult->i = ((s4) arg0 > (s4) arg1) ? arg0 : arg1;
@@ -579,7 +602,7 @@
  * by an fcmpd of the result against itself.  If it doesn't match (i.e.
  * it's NaN), the libm sqrt() is invoked.
  */
-static bool javaLangMath_sqrt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_sqrt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert64 convert;
@@ -592,7 +615,7 @@
 /*
  * public static double cos(double)
  */
-static bool javaLangMath_cos(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_cos(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert64 convert;
@@ -605,7 +628,7 @@
 /*
  * public static double sin(double)
  */
-static bool javaLangMath_sin(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+bool javaLangMath_sin(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
     JValue* pResult)
 {
     Convert64 convert;
@@ -621,7 +644,7 @@
  * ===========================================================================
  */
 
-static bool javaLangFloat_floatToIntBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
+bool javaLangFloat_floatToIntBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
     JValue* pResult)
 {
     Convert32 convert;
@@ -630,14 +653,14 @@
     return true;
 }
 
-static bool javaLangFloat_floatToRawIntBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
+bool javaLangFloat_floatToRawIntBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
     JValue* pResult)
 {
     pResult->i = arg0;
     return true;
 }
 
-static bool javaLangFloat_intBitsToFloat(u4 arg0, u4 arg1, u4 arg2, u4 arg,
+bool javaLangFloat_intBitsToFloat(u4 arg0, u4 arg1, u4 arg2, u4 arg,
     JValue* pResult)
 {
     Convert32 convert;
@@ -652,7 +675,7 @@
  * ===========================================================================
  */
 
-static bool javaLangDouble_doubleToLongBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
+bool javaLangDouble_doubleToLongBits(u4 arg0, u4 arg1, u4 arg2, u4 arg,
     JValue* pResult)
 {
     Convert64 convert;
@@ -662,7 +685,7 @@
     return true;
 }
 
-static bool javaLangDouble_doubleToRawLongBits(u4 arg0, u4 arg1, u4 arg2,
+bool javaLangDouble_doubleToRawLongBits(u4 arg0, u4 arg1, u4 arg2,
     u4 arg, JValue* pResult)
 {
     Convert64 convert;
@@ -672,7 +695,7 @@
     return true;
 }
 
-static bool javaLangDouble_longBitsToDouble(u4 arg0, u4 arg1, u4 arg2, u4 arg,
+bool javaLangDouble_longBitsToDouble(u4 arg0, u4 arg1, u4 arg2, u4 arg,
     JValue* pResult)
 {
     Convert64 convert;
@@ -898,17 +921,3 @@
     TRACE_METHOD_EXIT(self, method);
     return result;
 }
-
-/*
- * Check that we can resolve every inline native.
- */
-bool dvmInlineNativeCheck(void)
-{
-    int op;
-    for (op = 0; op < NELEM(gDvmInlineOpsTable); ++op) {
-        if (resolveInlineNative(op) == NULL) {
-            dvmAbort();
-        }
-    }
-    return true;
-}
diff --git a/vm/InlineNative.h b/vm/InlineNative.h
index cb31f51..280f6af 100644
--- a/vm/InlineNative.h
+++ b/vm/InlineNative.h
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 /*
  * Inlined native functions.
  */
@@ -21,7 +22,6 @@
 
 /* startup/shutdown */
 bool dvmInlineNativeStartup(void);
-bool dvmInlineNativeCheck(void);
 void dvmInlineNativeShutdown(void);
 
 Method* dvmFindInlinableMethod(const char* classDescriptor,
@@ -52,7 +52,11 @@
     const char*     methodSignature;
 } InlineOperation;
 
-/* Must be kept in sync w/ gDvmInlineOpsTable in InlineNative.c */
+/*
+ * Must be kept in sync w/ gDvmInlineOpsTable in InlineNative.c
+ *
+ * You should also add a test to libcore's IntrinsicTest.
+ */
 typedef enum NativeInlineOps {
     INLINE_EMPTYINLINEMETHOD = 0,
     INLINE_STRING_CHARAT = 1,
diff --git a/vm/Intern.c b/vm/Intern.c
index f91cc35..8f84528 100644
--- a/vm/Intern.c
+++ b/vm/Intern.c
@@ -63,7 +63,7 @@
         /*
          * Check the literal table for a match.
          */
-        StringObject* literal = dvmHashTableLookup(gDvm.literalStrings,
+        StringObject* literal = (StringObject*)dvmHashTableLookup(gDvm.literalStrings,
                                                    hash, strObj,
                                                    dvmHashcmpStrings,
                                                    false);
@@ -77,7 +77,7 @@
              * There is no match in the literal table, check the
              * interned string table.
              */
-            StringObject* interned = dvmHashTableLookup(gDvm.internedStrings,
+            StringObject* interned = (StringObject*)dvmHashTableLookup(gDvm.internedStrings,
                                                         hash, strObj,
                                                         dvmHashcmpStrings,
                                                         false);
@@ -87,7 +87,7 @@
                  * matching string to the literal table.
                  */
                 dvmHashTableRemove(gDvm.internedStrings, hash, interned);
-                found = dvmHashTableLookup(gDvm.literalStrings,
+                found = (StringObject*)dvmHashTableLookup(gDvm.literalStrings,
                                            hash, interned,
                                            dvmHashcmpStrings,
                                            true);
@@ -97,7 +97,7 @@
                  * No match in the literal table or the interned
                  * table.  Insert into the literal table.
                  */
-                found = dvmHashTableLookup(gDvm.literalStrings,
+                found = (StringObject*)dvmHashTableLookup(gDvm.literalStrings,
                                            hash, strObj,
                                            dvmHashcmpStrings,
                                            true);
@@ -108,7 +108,7 @@
         /*
          * Check the literal table for a match.
          */
-        found = dvmHashTableLookup(gDvm.literalStrings,
+        found = (StringObject*)dvmHashTableLookup(gDvm.literalStrings,
                                    hash, strObj,
                                    dvmHashcmpStrings,
                                    false);
@@ -117,7 +117,7 @@
              * No match was found in the literal table.  Insert into
              * the intern table.
              */
-            found = dvmHashTableLookup(gDvm.internedStrings,
+            found = (StringObject*)dvmHashTableLookup(gDvm.internedStrings,
                                        hash, strObj,
                                        dvmHashcmpStrings,
                                        true);
@@ -163,8 +163,8 @@
     }
     dvmLockMutex(&gDvm.internLock);
     hash = dvmComputeStringHash(strObj);
-    found = dvmHashTableLookup(gDvm.internedStrings, hash, (void*)strObj,
-                               dvmHashcmpStrings, false);
+    found = (StringObject*)dvmHashTableLookup(gDvm.internedStrings, hash,
+                               (StringObject*)strObj, dvmHashcmpStrings, false);
     dvmUnlockMutex(&gDvm.internLock);
     return found == strObj;
 }
diff --git a/vm/JarFile.c b/vm/JarFile.c
index 1147eca..f26f1d6 100644
--- a/vm/JarFile.c
+++ b/vm/JarFile.c
@@ -51,7 +51,7 @@
     size_t bufLen = fileNameLen + suffixLen + 1;
     int fd = -1;
 
-    buf = malloc(bufLen);
+    buf = (char*)malloc(bufLen);
     if (buf == NULL) {
         errno = ENOMEM;
         return -1;
diff --git a/vm/Jni.c b/vm/Jni.c
index 88f84ac..e39a065 100644
--- a/vm/Jni.c
+++ b/vm/Jni.c
@@ -236,7 +236,6 @@
 
 /* fwd */
 static const struct JNINativeInterface gNativeInterface;
-static jobject addGlobalReference(Object* obj);
 
 #ifdef WITH_JNI_STACK_CHECK
 # define COMPUTE_STACK_SUM(_self)   computeStackSum(_self);
@@ -287,6 +286,8 @@
 #define kGrefWaterInterval          100
 #define kTrackGrefUsage             true
 
+#define kWeakGlobalRefsTableInitialSize 16
+
 #define kPinTableInitialSize        16
 #define kPinTableMaxSize            1024
 #define kPinComplainThreshold       10
@@ -297,18 +298,19 @@
  */
 bool dvmJniStartup(void)
 {
-#ifdef USE_INDIRECT_REF
     if (!dvmInitIndirectRefTable(&gDvm.jniGlobalRefTable,
-            kGlobalRefsTableInitialSize, kGlobalRefsTableMaxSize,
-            kIndirectKindGlobal))
+                                 kGlobalRefsTableInitialSize,
+                                 kGlobalRefsTableMaxSize,
+                                 kIndirectKindGlobal))
         return false;
-#else
-    if (!dvmInitReferenceTable(&gDvm.jniGlobalRefTable,
-            kGlobalRefsTableInitialSize, kGlobalRefsTableMaxSize))
+    if (!dvmInitIndirectRefTable(&gDvm.jniWeakGlobalRefTable,
+                                 kWeakGlobalRefsTableInitialSize,
+                                 kGlobalRefsTableMaxSize,
+                                 kIndirectKindWeakGlobal))
         return false;
-#endif
 
     dvmInitMutex(&gDvm.jniGlobalRefLock);
+    dvmInitMutex(&gDvm.jniWeakGlobalRefLock);
     gDvm.jniGlobalRefLoMark = 0;
     gDvm.jniGlobalRefHiMark = kGrefWaterInterval * 2;
 
@@ -318,64 +320,6 @@
 
     dvmInitMutex(&gDvm.jniPinRefLock);
 
-    Method* meth;
-
-    /*
-     * Grab the PhantomReference constructor.
-     */
-    gDvm.classJavaLangRefPhantomReference =
-        dvmFindSystemClassNoInit("Ljava/lang/ref/PhantomReference;");
-    if (gDvm.classJavaLangRefPhantomReference == NULL) {
-        LOGE("Unable to find PhantomReference class\n");
-        return false;
-    }
-    meth= dvmFindDirectMethodByDescriptor(gDvm.classJavaLangRefPhantomReference,
-        "<init>", "(Ljava/lang/Object;Ljava/lang/ref/ReferenceQueue;)V");
-    if (meth == NULL) {
-        LOGE("Unable to find constructor for PhantomReference\n");
-        return false;
-    }
-    gDvm.methJavaLangRefPhantomReference_init = meth;
-
-
-    /*
-     * Look up and cache pointers to some direct buffer classes, fields,
-     * and methods.
-     */
-    ClassObject* readWriteBufferClass =
-        dvmFindSystemClassNoInit("Ljava/nio/ReadWriteDirectByteBuffer;");
-    ClassObject* bufferClass =
-        dvmFindSystemClassNoInit("Ljava/nio/Buffer;");
-
-    if (readWriteBufferClass == NULL || bufferClass == NULL) {
-        LOGE("Unable to find internal direct buffer classes\n");
-        return false;
-    }
-    gDvm.classJavaNioReadWriteDirectByteBuffer = readWriteBufferClass;
-
-    meth = dvmFindDirectMethodByDescriptor(readWriteBufferClass,
-                "<init>",
-                "(II)V");
-    if (meth == NULL) {
-        LOGE("Unable to find ReadWriteDirectByteBuffer.<init>\n");
-        return false;
-    }
-    gDvm.methJavaNioReadWriteDirectByteBuffer_init = meth;
-
-    gDvm.offJavaNioBuffer_capacity =
-        dvmFindFieldOffset(bufferClass, "capacity", "I");
-    if (gDvm.offJavaNioBuffer_capacity < 0) {
-        LOGE("Unable to find Buffer.capacity\n");
-        return false;
-    }
-
-    gDvm.offJavaNioBuffer_effectiveDirectAddress =
-        dvmFindFieldOffset(bufferClass, "effectiveDirectAddress", "I");
-    if (gDvm.offJavaNioBuffer_effectiveDirectAddress < 0) {
-        LOGE("Unable to find Buffer.effectiveDirectAddress\n");
-        return false;
-    }
-
     return true;
 }
 
@@ -384,11 +328,8 @@
  */
 void dvmJniShutdown(void)
 {
-#ifdef USE_INDIRECT_REF
     dvmClearIndirectRefTable(&gDvm.jniGlobalRefTable);
-#else
-    dvmClearReferenceTable(&gDvm.jniGlobalRefTable);
-#endif
+    dvmClearIndirectRefTable(&gDvm.jniWeakGlobalRefTable);
     dvmClearReferenceTable(&gDvm.jniPinRefTable);
 }
 
@@ -498,17 +439,11 @@
  * Going through "env" rather than dvmThreadSelf() is faster but will
  * get weird if the JNI code is passing the wrong JNIEnv around.
  */
-#ifdef USE_INDIRECT_REF
 static inline IndirectRefTable* getLocalRefTable(JNIEnv* env)
-#else
-static inline ReferenceTable* getLocalRefTable(JNIEnv* env)
-#endif
 {
-    //return &dvmThreadSelf()->jniLocalRefTable;
     return &((JNIEnvExt*)env)->self->jniLocalRefTable;
 }
 
-#ifdef USE_INDIRECT_REF
 /*
  * Convert an indirect reference to an Object reference.  The indirect
  * reference may be local, global, or weak-global.
@@ -540,10 +475,11 @@
         break;
     case kIndirectKindWeakGlobal:
         {
-            // TODO: implement
-            LOGE("weak-global not yet supported\n");
-            result = NULL;
-            dvmAbort();
+            // TODO: find a way to avoid the mutex activity here
+            IndirectRefTable* pRefTable = &gDvm.jniWeakGlobalRefTable;
+            dvmLockMutex(&gDvm.jniWeakGlobalRefLock);
+            result = dvmGetFromIndirectRefTable(pRefTable, jobj);
+            dvmUnlockMutex(&gDvm.jniWeakGlobalRefLock);
         }
         break;
     case kIndirectKindInvalid:
@@ -556,9 +492,6 @@
 
     return result;
 }
-#else
-    /* use trivial inline in JniInternal.h for performance */
-#endif
 
 /*
  * Add a local reference for an object to the current stack frame.  When
@@ -577,14 +510,10 @@
     if (obj == NULL)
         return NULL;
 
-    jobject jobj;
-
-#ifdef USE_INDIRECT_REF
     IndirectRefTable* pRefTable = getLocalRefTable(env);
     void* curFrame = ((JNIEnvExt*)env)->self->curFrame;
     u4 cookie = SAVEAREA_FROM_FP(curFrame)->xtra.localRefCookie;
-
-    jobj = (jobject) dvmAddToIndirectRefTable(pRefTable, cookie, obj);
+    jobject jobj = (jobject) dvmAddToIndirectRefTable(pRefTable, cookie, obj);
     if (jobj == NULL) {
         dvmDumpIndirectRefTable(pRefTable, "JNI local");
         LOGE("Failed adding to JNI local ref table (has %d entries)\n",
@@ -597,25 +526,6 @@
             dvmGetCurrentJNIMethod()->name,
             (int) dvmReferenceTableEntries(pRefTable));
     }
-#else
-    ReferenceTable* pRefTable = getLocalRefTable(env);
-
-    if (!dvmAddToReferenceTable(pRefTable, obj)) {
-        dvmDumpReferenceTable(pRefTable, "JNI local");
-        LOGE("Failed adding to JNI local ref table (has %d entries)\n",
-            (int) dvmReferenceTableEntries(pRefTable));
-        dvmDumpThread(dvmThreadSelf(), false);
-        dvmAbort();     // spec says call FatalError; this is equivalent
-    } else {
-        LOGVV("LREF add %p  (%s.%s) (ent=%d)\n", obj,
-            dvmGetCurrentJNIMethod()->clazz->descriptor,
-            dvmGetCurrentJNIMethod()->name,
-            (int) dvmReferenceTableEntries(pRefTable));
-    }
-
-    jobj = (jobject) obj;
-#endif
-
     return jobj;
 }
 
@@ -625,16 +535,10 @@
  */
 static bool ensureLocalCapacity(JNIEnv* env, int capacity)
 {
-#ifdef USE_INDIRECT_REF
     IndirectRefTable* pRefTable = getLocalRefTable(env);
     int numEntries = dvmIndirectRefTableEntries(pRefTable);
     // TODO: this isn't quite right, since "numEntries" includes holes
     return ((kJniLocalRefMax - numEntries) >= capacity);
-#else
-    ReferenceTable* pRefTable = getLocalRefTable(env);
-
-    return (kJniLocalRefMax - (pRefTable->nextEntry - pRefTable->table) >= capacity);
-#endif
 }
 
 /*
@@ -645,7 +549,6 @@
     if (jobj == NULL)
         return;
 
-#ifdef USE_INDIRECT_REF
     IndirectRefTable* pRefTable = getLocalRefTable(env);
     Thread* self = ((JNIEnvExt*)env)->self;
     u4 cookie = SAVEAREA_FROM_FP(self->curFrame)->xtra.localRefCookie;
@@ -660,23 +563,6 @@
          */
         LOGW("JNI WARNING: DeleteLocalRef(%p) failed to find entry\n", jobj);
     }
-#else
-    ReferenceTable* pRefTable = getLocalRefTable(env);
-    Thread* self = ((JNIEnvExt*)env)->self;
-    Object** bottom = SAVEAREA_FROM_FP(self->curFrame)->xtra.localRefCookie;
-
-    if (!dvmRemoveFromReferenceTable(pRefTable, bottom, (Object*) jobj)) {
-        /*
-         * Attempting to delete a local reference that is not in the
-         * topmost local reference frame is a no-op.  DeleteLocalRef returns
-         * void and doesn't throw any exceptions, but we should probably
-         * complain about it so the user will notice that things aren't
-         * going quite the way they expect.
-         */
-        LOGW("JNI WARNING: DeleteLocalRef(%p) failed to find entry (valid=%d)\n",
-            jobj, dvmIsValidObject((Object*) jobj));
-    }
-#endif
 }
 
 /*
@@ -734,7 +620,6 @@
      * we're either leaking global ref table entries or we're going to
      * run out of space in the GC heap.
      */
-#ifdef USE_INDIRECT_REF
     jobj = dvmAddToIndirectRefTable(&gDvm.jniGlobalRefTable, IRT_FIRST_SEGMENT,
             obj);
     if (jobj == NULL) {
@@ -771,46 +656,38 @@
             }
         }
     }
-#else
-    if (!dvmAddToReferenceTable(&gDvm.jniGlobalRefTable, obj)) {
-        dvmDumpReferenceTable(&gDvm.jniGlobalRefTable, "JNI global");
-        LOGE("Failed adding to JNI global ref table (%d entries)\n",
-            (int) dvmReferenceTableEntries(&gDvm.jniGlobalRefTable));
-        dvmAbort();
-    }
-    jobj = (jobject) obj;
-
-    LOGVV("GREF add %p  (%s.%s)\n", obj,
-        dvmGetCurrentJNIMethod()->clazz->descriptor,
-        dvmGetCurrentJNIMethod()->name);
-
-    /* GREF usage tracking; should probably be disabled for production env */
-    if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
-        int count = dvmReferenceTableEntries(&gDvm.jniGlobalRefTable);
-        if (count > gDvm.jniGlobalRefHiMark) {
-            LOGD("GREF has increased to %d\n", count);
-            gDvm.jniGlobalRefHiMark += kGrefWaterInterval;
-            gDvm.jniGlobalRefLoMark += kGrefWaterInterval;
-
-            /* watch for "excessive" use; not generally appropriate */
-            if (count >= gDvm.jniGrefLimit) {
-                JavaVMExt* vm = (JavaVMExt*) gDvm.vmList;
-                if (vm->warnError) {
-                    dvmDumpReferenceTable(&gDvm.jniGlobalRefTable,"JNI global");
-                    LOGE("Excessive JNI global references (%d)\n", count);
-                    dvmAbort();
-                } else {
-                    LOGW("Excessive JNI global references (%d)\n", count);
-                }
-            }
-        }
-    }
-#endif
-
     dvmUnlockMutex(&gDvm.jniGlobalRefLock);
     return jobj;
 }
 
+static jobject addWeakGlobalReference(Object* obj)
+{
+    if (obj == NULL)
+        return NULL;
+    dvmLockMutex(&gDvm.jniWeakGlobalRefLock);
+    IndirectRefTable *table = &gDvm.jniWeakGlobalRefTable;
+    jobject jobj = dvmAddToIndirectRefTable(table, IRT_FIRST_SEGMENT, obj);
+    if (jobj == NULL) {
+        dvmDumpIndirectRefTable(table, "JNI weak global");
+        LOGE("Failed adding to JNI weak global ref table (%zd entries)",
+             dvmIndirectRefTableEntries(table));
+    }
+    dvmUnlockMutex(&gDvm.jniWeakGlobalRefLock);
+    return jobj;
+}
+
+static void deleteWeakGlobalReference(jobject jobj)
+{
+    if (jobj == NULL)
+        return;
+    dvmLockMutex(&gDvm.jniWeakGlobalRefLock);
+    IndirectRefTable *table = &gDvm.jniWeakGlobalRefTable;
+    if (!dvmRemoveFromIndirectRefTable(table, IRT_FIRST_SEGMENT, jobj)) {
+        LOGW("JNI: DeleteWeakGlobalRef(%p) failed to find entry", jobj);
+    }
+    dvmUnlockMutex(&gDvm.jniWeakGlobalRefLock);
+}
+
 /*
  * Remove a global reference.  In most cases it's the entry most recently
  * added, which makes this pretty quick.
@@ -824,8 +701,6 @@
         return;
 
     dvmLockMutex(&gDvm.jniGlobalRefLock);
-
-#ifdef USE_INDIRECT_REF
     if (!dvmRemoveFromIndirectRefTable(&gDvm.jniGlobalRefTable,
             IRT_FIRST_SEGMENT, jobj))
     {
@@ -842,119 +717,11 @@
             gDvm.jniGlobalRefLoMark -= kGrefWaterInterval;
         }
     }
-#else
-    if (!dvmRemoveFromReferenceTable(&gDvm.jniGlobalRefTable,
-            gDvm.jniGlobalRefTable.table, jobj))
-    {
-        LOGW("JNI: DeleteGlobalRef(%p) failed to find entry (valid=%d)\n",
-            jobj, dvmIsValidObject((Object*) jobj));
-        goto bail;
-    }
-
-    if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
-        int count = dvmReferenceTableEntries(&gDvm.jniGlobalRefTable);
-        if (count < gDvm.jniGlobalRefLoMark) {
-            LOGD("GREF has decreased to %d\n", count);
-            gDvm.jniGlobalRefHiMark -= kGrefWaterInterval;
-            gDvm.jniGlobalRefLoMark -= kGrefWaterInterval;
-        }
-    }
-#endif
-
 bail:
     dvmUnlockMutex(&gDvm.jniGlobalRefLock);
 }
 
 /*
- * We create a PhantomReference that references the object, add a
- * global reference to it, and then flip some bits before returning it.
- * The last step ensures that we detect it as special and that only
- * appropriate calls will accept it.
- *
- * On failure, returns NULL with an exception pending.
- */
-static jweak createWeakGlobalRef(JNIEnv* env, jobject jobj)
-{
-    if (jobj == NULL)
-        return NULL;
-
-    Thread* self = ((JNIEnvExt*)env)->self;
-    Object* obj = dvmDecodeIndirectRef(env, jobj);
-    Object* phantomObj;
-    jobject phantomRef;
-
-    /*
-     * Allocate a PhantomReference, then call the constructor to set
-     * the referent and the reference queue.
-     *
-     * We use a "magic" reference queue that the GC knows about; it behaves
-     * more like a queueless WeakReference, clearing the referent and
-     * not calling enqueue().
-     */
-    if (!dvmIsClassInitialized(gDvm.classJavaLangRefPhantomReference))
-        dvmInitClass(gDvm.classJavaLangRefPhantomReference);
-    phantomObj = dvmAllocObject(gDvm.classJavaLangRefPhantomReference,
-            ALLOC_DEFAULT);
-    if (phantomObj == NULL) {
-        assert(dvmCheckException(self));
-        LOGW("Failed on WeakGlobalRef alloc\n");
-        return NULL;
-    }
-
-    JValue unused;
-    dvmCallMethod(self, gDvm.methJavaLangRefPhantomReference_init, phantomObj,
-        &unused, obj, NULL);
-    dvmReleaseTrackedAlloc(phantomObj, self);
-
-    if (dvmCheckException(self)) {
-        LOGW("PhantomReference init failed\n");
-        return NULL;
-    }
-
-    LOGV("+++ WGR: created phantom ref %p for object %p\n", phantomObj, obj);
-
-    /*
-     * Add it to the global reference table, and mangle the pointer.
-     */
-    phantomRef = addGlobalReference(phantomObj);
-    return dvmObfuscateWeakGlobalRef(phantomRef);
-}
-
-/*
- * Delete the global reference that's keeping the PhantomReference around.
- * The PhantomReference will eventually be discarded by the GC.
- */
-static void deleteWeakGlobalRef(JNIEnv* env, jweak wref)
-{
-    if (wref == NULL)
-        return;
-
-    jobject phantomRef = dvmNormalizeWeakGlobalRef(wref);
-    deleteGlobalReference(phantomRef);
-}
-
-/*
- * Extract the referent from a PhantomReference.  Used for weak global
- * references.
- *
- * "jwobj" is a "mangled" WGR pointer.
- */
-static Object* getPhantomReferent(JNIEnv* env, jweak jwobj)
-{
-    jobject jobj = dvmNormalizeWeakGlobalRef(jwobj);
-    Object* obj = dvmDecodeIndirectRef(env, jobj);
-
-    if (obj->clazz != gDvm.classJavaLangRefPhantomReference) {
-        LOGE("%p is not a phantom reference (%s)\n",
-            jwobj, obj->clazz->descriptor);
-        return NULL;
-    }
-
-    return dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_referent);
-}
-
-
-/*
  * Objects don't currently move, so we just need to create a reference
  * that will ensure the array object isn't collected.
  *
@@ -969,7 +736,7 @@
     if (!dvmAddToReferenceTable(&gDvm.jniPinRefTable, (Object*)arrayObj)) {
         dvmDumpReferenceTable(&gDvm.jniPinRefTable, "JNI pinned array");
         LOGE("Failed adding to JNI pinned array ref table (%d entries)\n",
-            (int) dvmReferenceTableEntries(&gDvm.jniPinRefTable));
+           (int) dvmReferenceTableEntries(&gDvm.jniPinRefTable));
         dvmDumpThread(dvmThreadSelf(), false);
         dvmAbort();
     }
@@ -1031,91 +798,12 @@
 {
     Thread* self = dvmThreadSelf();
     JNIEnv* env = self->jniEnv;
-    ReferenceTable* pLocalRefs = getLocalRefTable(env);
-
-#ifdef USE_INDIRECT_REF
+    IndirectRefTable* pLocalRefs = getLocalRefTable(env);
     dvmDumpIndirectRefTable(pLocalRefs, "JNI local");
     dvmDumpIndirectRefTable(&gDvm.jniGlobalRefTable, "JNI global");
-#else
-    dvmDumpReferenceTable(pLocalRefs, "JNI local");
-    dvmDumpReferenceTable(&gDvm.jniGlobalRefTable, "JNI global");
-#endif
     dvmDumpReferenceTable(&gDvm.jniPinRefTable, "JNI pinned array");
 }
 
-#ifndef USE_INDIRECT_REF
-/*
- * Determine if "obj" appears in the argument list for the native method.
- *
- * We use the "shorty" signature to determine which argument slots hold
- * reference types.
- */
-static bool findInArgList(Thread* self, Object* obj)
-{
-    const Method* meth;
-    u4* fp;
-    int i;
-
-    fp = self->curFrame;
-    while (1) {
-        /*
-         * Back up over JNI PushLocalFrame frames.  This works because the
-         * previous frame on the interpreted stack is either a break frame
-         * (if we called here via native code) or an interpreted method (if
-         * we called here via the interpreter).  In both cases the method
-         * pointer won't match.
-         */
-        StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
-        meth = saveArea->method;
-        if (meth != SAVEAREA_FROM_FP(saveArea->prevFrame)->method)
-            break;
-        fp = saveArea->prevFrame;
-    }
-
-    LOGVV("+++ scanning %d args in %s (%s)\n",
-        meth->insSize, meth->name, meth->shorty);
-    const char* shorty = meth->shorty +1;       /* skip return type char */
-    for (i = 0; i < meth->insSize; i++) {
-        if (i == 0 && !dvmIsStaticMethod(meth)) {
-            /* first arg is "this" ref, not represented in "shorty" */
-            if (fp[i] == (u4) obj)
-                return true;
-        } else {
-            /* if this is a reference type, see if it matches */
-            switch (*shorty) {
-            case 'L':
-                if (fp[i] == (u4) obj)
-                    return true;
-                break;
-            case 'D':
-            case 'J':
-                i++;
-                break;
-            case '\0':
-                LOGE("Whoops! ran off the end of %s (%d)\n",
-                    meth->shorty, meth->insSize);
-                break;
-            default:
-                if (fp[i] == (u4) obj)
-                    LOGI("NOTE: ref %p match on arg type %c\n", obj, *shorty);
-                break;
-            }
-            shorty++;
-        }
-    }
-
-    /*
-     * For static methods, we also pass a class pointer in.
-     */
-    if (dvmIsStaticMethod(meth)) {
-        //LOGI("+++ checking class pointer in %s\n", meth->name);
-        if ((void*)obj == (void*)meth->clazz)
-            return true;
-    }
-    return false;
-}
-#endif
-
 /*
  * Verify that a reference passed in from native code is one that the
  * code is allowed to have.
@@ -1134,7 +822,6 @@
  */
 jobjectRefType dvmGetJNIRefType(JNIEnv* env, jobject jobj)
 {
-#ifdef USE_INDIRECT_REF
     /*
      * IndirectRefKind is currently defined as an exact match of
      * jobjectRefType, so this is easy.  We have to decode it to determine
@@ -1148,40 +835,6 @@
     } else {
         return (jobjectRefType) dvmGetIndirectRefType(jobj);
     }
-#else
-    ReferenceTable* pRefTable = getLocalRefTable(env);
-    Thread* self = dvmThreadSelf();
-
-    if (dvmIsWeakGlobalRef(jobj)) {
-        return JNIWeakGlobalRefType;
-    }
-
-    /* check args */
-    if (findInArgList(self, jobj)) {
-        //LOGI("--- REF found %p on stack\n", jobj);
-        return JNILocalRefType;
-    }
-
-    /* check locals */
-    if (dvmFindInReferenceTable(pRefTable, pRefTable->table, jobj) != NULL) {
-        //LOGI("--- REF found %p in locals\n", jobj);
-        return JNILocalRefType;
-    }
-
-    /* check globals */
-    dvmLockMutex(&gDvm.jniGlobalRefLock);
-    if (dvmFindInReferenceTable(&gDvm.jniGlobalRefTable,
-            gDvm.jniGlobalRefTable.table, jobj))
-    {
-        //LOGI("--- REF found %p in globals\n", jobj);
-        dvmUnlockMutex(&gDvm.jniGlobalRefLock);
-        return JNIGlobalRefType;
-    }
-    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
-
-    /* not found! */
-    return JNIInvalidRefType;
-#endif
 }
 
 /*
@@ -1324,7 +977,7 @@
     DalvikBridgeFunc bridge = shouldTrace(method)
         ? dvmTraceCallJNIMethod
         : dvmSelectJNIBridge(method);
-    dvmSetNativeFunc(method, bridge, func);
+    dvmSetNativeFunc(method, bridge, (const u2*)func);
 }
 
 /*
@@ -1428,14 +1081,11 @@
 {
     if (dvmIsAbstractClass(clazz) || dvmIsInterfaceClass(clazz)) {
         /* JNI spec defines what this throws */
-        dvmThrowExceptionFmt("Ljava/lang/InstantiationException;",
-            "Can't instantiate %s (abstract or interface)", clazz->descriptor);
+        dvmThrowInstantiationException(clazz, "abstract class or interface");
         return false;
     } else if (dvmIsArrayClass(clazz) || clazz == gDvm.classJavaLangClass) {
         /* spec says "must not" for arrays, ignores Class */
-        dvmThrowExceptionFmt("Ljava/lang/IllegalArgumentException;",
-            "Can't instantiate %s (array or Class) with this JNI function",
-            clazz->descriptor);
+        dvmThrowInstantiationException(clazz, "wrong JNI function");
         return false;
     }
 
@@ -1544,13 +1194,11 @@
 static inline void convertReferenceResult(JNIEnv* env, JValue* pResult,
     const Method* method, Thread* self)
 {
-#ifdef USE_INDIRECT_REF
     if (method->shorty[0] == 'L' && !dvmCheckException(self) &&
             pResult->l != NULL)
     {
         pResult->l = dvmDecodeIndirectRef(env, pResult->l);
     }
-#endif
 }
 
 /*
@@ -1567,7 +1215,6 @@
     //LOGI("JNI calling %p (%s.%s:%s):\n", method->insns,
     //    method->clazz->descriptor, method->name, method->shorty);
 
-#ifdef USE_INDIRECT_REF
     /*
      * Walk the argument list, creating local references for appropriate
      * arguments.
@@ -1619,10 +1266,6 @@
 
         idx++;
     }
-#else
-    staticMethodClass = dvmIsStaticMethod(method) ?
-        (jclass) method->clazz : NULL;
-#endif
 
     oldStatus = dvmChangeStatus(self, THREAD_NATIVE);
 
@@ -1630,7 +1273,7 @@
     assert(method->insns != NULL);
 
     COMPUTE_STACK_SUM(self);
-    dvmPlatformInvoke(env, staticMethodClass,
+    dvmPlatformInvoke(env, (ClassObject*)staticMethodClass,
         method->jniArgInfo, method->insSize, modArgs, method->shorty,
         (void*)method->insns, pResult);
     CHECK_STACK_SUM(self);
@@ -1677,14 +1320,12 @@
     u4* modArgs = (u4*) args;
     int oldStatus;
 
-#ifdef USE_INDIRECT_REF
     jobject thisObj = addLocalReference(self->jniEnv, (Object*) args[0]);
     if (thisObj == NULL) {
         assert(dvmCheckException(self));
         return;
     }
     modArgs[0] = (u4) thisObj;
-#endif
 
     oldStatus = dvmChangeStatus(self, THREAD_NATIVE);
 
@@ -1712,22 +1353,18 @@
     jclass staticMethodClass;
     int oldStatus;
 
-#ifdef USE_INDIRECT_REF
     staticMethodClass = addLocalReference(self->jniEnv, (Object*)method->clazz);
     if (staticMethodClass == NULL) {
         assert(dvmCheckException(self));
         return;
     }
-#else
-    staticMethodClass = (jobject) method->clazz;
-#endif
 
     oldStatus = dvmChangeStatus(self, THREAD_NATIVE);
 
     ANDROID_MEMBAR_FULL();      /* guarantee ordering on method->insns */
 
     COMPUTE_STACK_SUM(self);
-    dvmPlatformInvoke(self->jniEnv, staticMethodClass,
+    dvmPlatformInvoke(self->jniEnv, (ClassObject*)staticMethodClass,
         method->jniArgInfo, method->insSize, args, method->shorty,
         (void*)method->insns, pResult);
     CHECK_STACK_SUM(self);
@@ -1974,7 +1611,7 @@
     JNI_ENTER();
 
     ClassObject* clazz = (ClassObject*) dvmDecodeIndirectRef(env, jclazz);
-    dvmThrowExceptionByClass(clazz, message);
+    dvmThrowException(clazz, message);
     // TODO: should return failure if this didn't work (e.g. OOM)
 
     JNI_EXIT();
@@ -2067,8 +1704,7 @@
     {
         /* yes, OutOfMemoryError, not StackOverflowError */
         dvmClearException(_self);
-        dvmThrowException("Ljava/lang/OutOfMemoryError;",
-            "out of stack in JNI PushLocalFrame");
+        dvmThrowOutOfMemoryError("out of stack in JNI PushLocalFrame");
         result = JNI_ERR;
     }
     JNI_EXIT();
@@ -2076,7 +1712,7 @@
 }
 
 /*
- * Pop the local frame off.  If "result" is not null, add it as a
+ * Pop the local frame off.  If "jresult" is not null, add it as a
  * local reference on the now-current frame.
  */
 static jobject PopLocalFrame(JNIEnv* env, jobject jresult)
@@ -2086,12 +1722,11 @@
     if (!dvmPopLocalFrame(_self /*dvmThreadSelf()*/)) {
         LOGW("JNI WARNING: too many PopLocalFrame calls\n");
         dvmClearException(_self);
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "too many PopLocalFrame calls");
+        dvmThrowRuntimeException("too many PopLocalFrame calls");
     }
     jresult = addLocalReference(env, result);
     JNI_EXIT();
-    return result;
+    return jresult;
 }
 
 /*
@@ -2099,13 +1734,8 @@
  */
 static jobject NewGlobalRef(JNIEnv* env, jobject jobj)
 {
-    Object* obj;
-
     JNI_ENTER();
-    if (dvmIsWeakGlobalRef(jobj))
-        obj = getPhantomReferent(env, (jweak) jobj);
-    else
-        obj = dvmDecodeIndirectRef(env, jobj);
+    Object* obj = dvmDecodeIndirectRef(env, jobj);
     jobject retval = addGlobalReference(obj);
     JNI_EXIT();
     return retval;
@@ -2127,13 +1757,8 @@
  */
 static jobject NewLocalRef(JNIEnv* env, jobject jobj)
 {
-    Object* obj;
-
     JNI_ENTER();
-    if (dvmIsWeakGlobalRef(jobj))
-        obj = getPhantomReferent(env, (jweak) jobj);
-    else
-        obj = dvmDecodeIndirectRef(env, jobj);
+    Object* obj = dvmDecodeIndirectRef(env, jobj);
     jobject retval = addLocalReference(env, obj);
     JNI_EXIT();
     return retval;
@@ -2158,8 +1783,7 @@
     JNI_ENTER();
     bool okay = ensureLocalCapacity(env, capacity);
     if (!okay) {
-        dvmThrowException("Ljava/lang/OutOfMemoryError;",
-            "can't ensure local reference capacity");
+        dvmThrowOutOfMemoryError("can't ensure local reference capacity");
     }
     JNI_EXIT();
     if (okay)
@@ -2355,7 +1979,7 @@
     } else if (dvmIsInterfaceClass(clazz)) {
         Method* meth = dvmFindInterfaceMethodHierByDescriptor(clazz, name, sig);
         if (meth == NULL) {
-            dvmThrowExceptionFmt("Ljava/lang/NoSuchMethodError;",
+            dvmThrowExceptionFmt(gDvm.exNoSuchMethodError,
                 "no method with name='%s' signature='%s' in interface %s",
                 name, sig, clazz->descriptor);
         }
@@ -2376,7 +2000,7 @@
             meth = NULL;
         }
         if (meth == NULL) {
-            dvmThrowExceptionFmt("Ljava/lang/NoSuchMethodError;",
+            dvmThrowExceptionFmt(gDvm.exNoSuchMethodError,
                 "no method with name='%s' signature='%s' in class %s",
                 name, sig, clazz->descriptor);
         } else {
@@ -2411,7 +2035,7 @@
     } else {
         id = (jfieldID) dvmFindInstanceFieldHier(clazz, name, sig);
         if (id == NULL) {
-            dvmThrowExceptionFmt("Ljava/lang/NoSuchFieldError;",
+            dvmThrowExceptionFmt(gDvm.exNoSuchFieldError,
                 "no field with name='%s' signature='%s' in class %s",
                 name, sig, clazz->descriptor);
         }
@@ -2453,7 +2077,7 @@
 
         id = (jmethodID) meth;
         if (id == NULL) {
-            dvmThrowExceptionFmt("Ljava/lang/NoSuchMethodError;",
+            dvmThrowExceptionFmt(gDvm.exNoSuchMethodError,
                 "no static method with name='%s' signature='%s' in class %s",
                 name, sig, clazz->descriptor);
         }
@@ -2480,7 +2104,7 @@
     } else {
         id = (jfieldID) dvmFindStaticField(clazz, name, sig);
         if (id == NULL) {
-            dvmThrowExceptionFmt("Ljava/lang/NoSuchFieldError;",
+            dvmThrowExceptionFmt(gDvm.exNoSuchFieldError,
                 "no static field with name='%s' signature='%s' in class %s",
                 name, sig, clazz->descriptor);
         }
@@ -2533,7 +2157,7 @@
 /*
  * Set a static field.
  */
-#define SET_STATIC_TYPE_FIELD(_ctype, _jname, _isref)                       \
+#define SET_STATIC_TYPE_FIELD(_ctype, _ctype2, _jname, _isref)              \
     static void SetStatic##_jname##Field(JNIEnv* env, jclass jclazz,        \
         jfieldID fieldID, _ctype value)                                     \
     {                                                                       \
@@ -2546,7 +2170,7 @@
                     dvmDecodeIndirectRef(env, (jobject)(u4)value);          \
                 dvmSetStaticFieldObjectVolatile(sfield, valObj);            \
             } else {                                                        \
-                dvmSetStaticField##_jname##Volatile(sfield, value);         \
+                dvmSetStaticField##_jname##Volatile(sfield, (_ctype2)value);\
             }                                                               \
         } else {                                                            \
             if (_isref) {                                                   \
@@ -2554,20 +2178,20 @@
                     dvmDecodeIndirectRef(env, (jobject)(u4)value);          \
                 dvmSetStaticFieldObject(sfield, valObj);                    \
             } else {                                                        \
-                dvmSetStaticField##_jname(sfield, value);                   \
+                dvmSetStaticField##_jname(sfield, (_ctype2)value);          \
             }                                                               \
         }                                                                   \
         JNI_EXIT();                                                         \
     }
-SET_STATIC_TYPE_FIELD(jobject, Object, true);
-SET_STATIC_TYPE_FIELD(jboolean, Boolean, false);
-SET_STATIC_TYPE_FIELD(jbyte, Byte, false);
-SET_STATIC_TYPE_FIELD(jchar, Char, false);
-SET_STATIC_TYPE_FIELD(jshort, Short, false);
-SET_STATIC_TYPE_FIELD(jint, Int, false);
-SET_STATIC_TYPE_FIELD(jlong, Long, false);
-SET_STATIC_TYPE_FIELD(jfloat, Float, false);
-SET_STATIC_TYPE_FIELD(jdouble, Double, false);
+SET_STATIC_TYPE_FIELD(jobject, Object*, Object, true);
+SET_STATIC_TYPE_FIELD(jboolean, bool, Boolean, false);
+SET_STATIC_TYPE_FIELD(jbyte, s1, Byte, false);
+SET_STATIC_TYPE_FIELD(jchar, u2, Char, false);
+SET_STATIC_TYPE_FIELD(jshort, s2, Short, false);
+SET_STATIC_TYPE_FIELD(jint, s4, Int, false);
+SET_STATIC_TYPE_FIELD(jlong, s8, Long, false);
+SET_STATIC_TYPE_FIELD(jfloat, float, Float, false);
+SET_STATIC_TYPE_FIELD(jdouble, double, Double, false);
 
 /*
  * Get an instance field.
@@ -2615,7 +2239,7 @@
 /*
  * Set an instance field.
  */
-#define SET_TYPE_FIELD(_ctype, _jname, _isref)                              \
+#define SET_TYPE_FIELD(_ctype, _ctype2, _jname, _isref)                     \
     static void Set##_jname##Field(JNIEnv* env, jobject jobj,               \
         jfieldID fieldID, _ctype value)                                     \
     {                                                                       \
@@ -2629,7 +2253,7 @@
                 dvmSetFieldObjectVolatile(obj, field->byteOffset, valObj);  \
             } else {                                                        \
                 dvmSetField##_jname##Volatile(obj,                          \
-                    field->byteOffset, value);                              \
+                    field->byteOffset, (_ctype2)value);                     \
             }                                                               \
         } else {                                                            \
             if (_isref) {                                                   \
@@ -2637,20 +2261,21 @@
                     dvmDecodeIndirectRef(env, (jobject)(u4)value);          \
                 dvmSetFieldObject(obj, field->byteOffset, valObj);          \
             } else {                                                        \
-                dvmSetField##_jname(obj, field->byteOffset, value);         \
+                dvmSetField##_jname(obj,                                    \
+                    field->byteOffset, (_ctype2)value);                     \
             }                                                               \
         }                                                                   \
         JNI_EXIT();                                                         \
     }
-SET_TYPE_FIELD(jobject, Object, true);
-SET_TYPE_FIELD(jboolean, Boolean, false);
-SET_TYPE_FIELD(jbyte, Byte, false);
-SET_TYPE_FIELD(jchar, Char, false);
-SET_TYPE_FIELD(jshort, Short, false);
-SET_TYPE_FIELD(jint, Int, false);
-SET_TYPE_FIELD(jlong, Long, false);
-SET_TYPE_FIELD(jfloat, Float, false);
-SET_TYPE_FIELD(jdouble, Double, false);
+SET_TYPE_FIELD(jobject, Object*, Object, true);
+SET_TYPE_FIELD(jboolean, bool, Boolean, false);
+SET_TYPE_FIELD(jbyte, s1, Byte, false);
+SET_TYPE_FIELD(jchar, u2, Char, false);
+SET_TYPE_FIELD(jshort, s2, Short, false);
+SET_TYPE_FIELD(jint, s4, Int, false);
+SET_TYPE_FIELD(jlong, s8, Long, false);
+SET_TYPE_FIELD(jfloat, float, Float, false);
+SET_TYPE_FIELD(jdouble, double, Double, false);
 
 /*
  * Make a virtual method call.
@@ -2676,7 +2301,7 @@
         dvmCallMethodV(_self, meth, obj, true, &result, args);              \
         va_end(args);                                                       \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }                                                                       \
@@ -2694,7 +2319,7 @@
         }                                                                   \
         dvmCallMethodV(_self, meth, obj, true, &result, args);              \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }                                                                       \
@@ -2712,7 +2337,7 @@
         }                                                                   \
         dvmCallMethodA(_self, meth, obj, true, &result, args);              \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }
@@ -2753,7 +2378,7 @@
         va_start(args, methodID);                                           \
         dvmCallMethodV(_self, meth, obj, true, &result, args);              \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         va_end(args);                                                       \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
@@ -2774,7 +2399,7 @@
         }                                                                   \
         dvmCallMethodV(_self, meth, obj, true, &result, args);              \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }                                                                       \
@@ -2794,7 +2419,7 @@
         }                                                                   \
         dvmCallMethodA(_self, meth, obj, true, &result, args);              \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }
@@ -2825,7 +2450,7 @@
         dvmCallMethodV(_self, (Method*)methodID, NULL, true, &result, args);\
         va_end(args);                                                       \
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }                                                                       \
@@ -2837,7 +2462,7 @@
         JValue result;                                                      \
         dvmCallMethodV(_self, (Method*)methodID, NULL, true, &result, args);\
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }                                                                       \
@@ -2849,7 +2474,7 @@
         JValue result;                                                      \
         dvmCallMethodA(_self, (Method*)methodID, NULL, true, &result, args);\
         if (_isref && !dvmCheckException(_self))                            \
-            result.l = addLocalReference(env, result.l);                    \
+            result.l = addLocalReference(env, (Object*)result.l);           \
         JNI_EXIT();                                                         \
         return _retok;                                                      \
     }
@@ -3007,8 +2632,7 @@
         newStr = dvmCreateCstrFromString(strObj);
         if (newStr == NULL) {
             /* assume memory failure */
-            dvmThrowException("Ljava/lang/OutOfMemoryError;",
-                "native heap string alloc failed");
+            dvmThrowOutOfMemoryError("native heap string alloc failed");
         }
     }
 
@@ -3053,8 +2677,7 @@
         (ClassObject*) dvmDecodeIndirectRef(env, jelementClass);
 
     if (elemClassObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;",
-            "JNI NewObjectArray");
+        dvmThrowNullPointerException("JNI NewObjectArray");
         goto bail;
     }
 
@@ -3087,9 +2710,7 @@
 static bool checkArrayElementBounds(ArrayObject* arrayObj, jsize index) {
     assert(arrayObj != NULL);
     if (index < 0 || index >= (int) arrayObj->length) {
-        dvmThrowExceptionFmt("Ljava/lang/ArrayIndexOutOfBoundsException;",
-            "%s index=%d length=%d", arrayObj->obj.clazz->descriptor, index,
-            arrayObj->length);
+        dvmThrowArrayIndexOutOfBoundsException(arrayObj->length, index);
         return false;
     }
     return true;
@@ -3217,7 +2838,7 @@
 static void throwArrayRegionOutOfBounds(ArrayObject* arrayObj, jsize start,
     jsize len, const char* arrayIdentifier)
 {
-    dvmThrowExceptionFmt("Ljava/lang/ArrayIndexOutOfBoundsException;",
+    dvmThrowExceptionFmt(gDvm.exArrayIndexOutOfBoundsException,
         "%s offset=%d length=%d %s.length=%d",
         arrayObj->obj.clazz->descriptor, start, len, arrayIdentifier,
         arrayObj->length);
@@ -3415,8 +3036,9 @@
 {
     JNI_ENTER();
     StringObject* strObj = (StringObject*) dvmDecodeIndirectRef(env, jstr);
-    if (start + len > dvmStringLen(strObj))
-        dvmThrowException("Ljava/lang/StringIndexOutOfBoundsException;", NULL);
+    int strLen = dvmStringLen(strObj);
+    if (((start|len) < 0) || (start + len > dvmStringLen(strObj)))
+        dvmThrowStringIndexOutOfBoundsExceptionWithRegion(strLen, start, len);
     else
         memcpy(buf, dvmStringChars(strObj) + start, len * sizeof(u2));
     JNI_EXIT();
@@ -3431,8 +3053,9 @@
 {
     JNI_ENTER();
     StringObject* strObj = (StringObject*) dvmDecodeIndirectRef(env, jstr);
-    if (start + len > dvmStringLen(strObj))
-        dvmThrowException("Ljava/lang/StringIndexOutOfBoundsException;", NULL);
+    int strLen = dvmStringLen(strObj);
+    if (((start|len) < 0) || (start + len > dvmStringLen(strObj)))
+        dvmThrowStringIndexOutOfBoundsExceptionWithRegion(strLen, start, len);
     else
         dvmCreateCstrFromStringRegion(strObj, start, len, buf);
     JNI_EXIT();
@@ -3510,12 +3133,13 @@
 /*
  * Create a new weak global reference.
  */
-static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj)
+static jweak NewWeakGlobalRef(JNIEnv* env, jobject jobj)
 {
     JNI_ENTER();
-    jweak wref = createWeakGlobalRef(env, obj);
+    Object *obj = dvmDecodeIndirectRef(env, jobj);
+    jweak retval = addWeakGlobalReference(obj);
     JNI_EXIT();
-    return wref;
+    return retval;
 }
 
 /*
@@ -3524,7 +3148,7 @@
 static void DeleteWeakGlobalRef(JNIEnv* env, jweak wref)
 {
     JNI_ENTER();
-    deleteWeakGlobalRef(env, wref);
+    deleteWeakGlobalReference(wref);
     JNI_EXIT();
 }
 
@@ -4294,13 +3918,14 @@
             fprintf(stderr, "ERROR: arg %d string was null\n", i);
             goto bail;
         } else if (strcmp(optStr, "vfprintf") == 0) {
-            gDvm.vfprintfHook = args->options[i].extraInfo;
+            gDvm.vfprintfHook =
+                (int (*)(FILE *, const char*, va_list))args->options[i].extraInfo;
         } else if (strcmp(optStr, "exit") == 0) {
-            gDvm.exitHook = args->options[i].extraInfo;
+            gDvm.exitHook = (void (*)(int)) args->options[i].extraInfo;
         } else if (strcmp(optStr, "abort") == 0) {
-            gDvm.abortHook = args->options[i].extraInfo;
+            gDvm.abortHook = (void (*)(void))args->options[i].extraInfo;
         } else if (strcmp(optStr, "sensitiveThread") == 0) {
-            gDvm.isSensitiveThreadHook = args->options[i].extraInfo;
+            gDvm.isSensitiveThreadHook = (bool (*)(void))args->options[i].extraInfo;
         } else if (strcmp(optStr, "-Xcheck:jni") == 0) {
             checkJni = true;
         } else if (strncmp(optStr, "-Xjniopts:", 10) == 0) {
diff --git a/vm/JniInternal.h b/vm/JniInternal.h
index 302dcb0..bc88cad 100644
--- a/vm/JniInternal.h
+++ b/vm/JniInternal.h
@@ -104,11 +104,7 @@
  */
 INLINE void dvmPopJniLocals(Thread* self, StackSaveArea* saveArea)
 {
-#ifdef USE_INDIRECT_REF
     self->jniLocalRefTable.segmentState.all = saveArea->xtra.localRefCookie;
-#else
-    self->jniLocalRefTable.nextEntry = saveArea->xtra.localRefCookie;
-#endif
 }
 
 /*
@@ -158,14 +154,7 @@
 /*
  * Decode a local, global, or weak-global reference.
  */
-#ifdef USE_INDIRECT_REF
 Object* dvmDecodeIndirectRef(JNIEnv* env, jobject jobj);
-#else
-/* use an inline to ensure this is a no-op */
-INLINE Object* dvmDecodeIndirectRef(JNIEnv* env, jobject jobj) {
-    return (Object*) jobj;
-}
-#endif
 
 /*
  * Verify that a reference passed in from native code is valid.  Returns
@@ -208,41 +197,4 @@
  */
 void dvmDumpJniReferenceTables(void);
 
-/*
- * This mask is applied to weak global reference values returned to
- * native code.  The goal is to create an invalid pointer that will cause
- * a crash if misused.  The mmap region for the virtual heap is typically
- * around 0x40xxxxxx.
- *
- * To make weak global references easily distinguishable from other kinds
- * of references when !USE_INDIRECT_REF, we XOR the low bits.  Assuming >=
- * 64-bit alignment of objects, this changes the low 3 bits from all clear
- * to all set.
- */
-#define WEAK_GLOBAL_XOR 0x9e0fffff
-
-/*
- * "Obfuscate" a weak global reference pointer.
- */
-INLINE jweak dvmObfuscateWeakGlobalRef(jobject jobj) {
-    return (jweak) ((u4) jobj ^ WEAK_GLOBAL_XOR);
-}
-
-/*
- * Undo the obfuscation.
- */
-INLINE jobject dvmNormalizeWeakGlobalRef(jweak ref) {
-    return (jobject) ((u4) ref ^ WEAK_GLOBAL_XOR);
-}
-
-/*
- * Returns "true" if this looks like a weak global reference.
- *
- * Relies on the low 3 bits being set instead of clear (the latter is
- * guaranteed by 64-bit alignment of objects).
- */
-INLINE bool dvmIsWeakGlobalRef(jobject jobj) {
-    return (((u4) jobj & 0x07) == 0x07);
-}
-
 #endif /*_DALVIK_JNIINTERNAL*/
diff --git a/vm/LinearAlloc.c b/vm/LinearAlloc.c
index ee634f0..dc93759 100644
--- a/vm/LinearAlloc.c
+++ b/vm/LinearAlloc.c
@@ -139,7 +139,7 @@
         return NULL;
     }
 
-    pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
+    pHdr->mapAddr = (char*)mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
         MAP_PRIVATE, fd, 0);
     if (pHdr->mapAddr == MAP_FAILED) {
         LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
@@ -202,7 +202,7 @@
     if (ENFORCE_READ_ONLY) {
         /* allocate the per-page ref count */
         int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
-        pHdr->writeRefCount = calloc(numPages, sizeof(short));
+        pHdr->writeRefCount = (short*)calloc(numPages, sizeof(short));
         if (pHdr->writeRefCount == NULL) {
             free(pHdr);
             return NULL;
diff --git a/vm/Misc.c b/vm/Misc.c
index 564d415..f5fd34a 100644
--- a/vm/Misc.c
+++ b/vm/Misc.c
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 /*
  * Miscellaneous utility functions.
  */
@@ -29,9 +30,6 @@
 #include <cutils/ashmem.h>
 #include <sys/mman.h>
 
-#define ALIGN_UP_TO_PAGE_SIZE(p) \
-    (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
-
 /*
  * Print a hex dump in this format:
  *
@@ -50,7 +48,7 @@
     size_t length, HexDumpMode mode)
 {
     static const char gHexDigit[] = "0123456789abcdef";
-    const unsigned char* addr = vaddr;
+    const unsigned char* addr = (const unsigned char*)vaddr;
     char out[77];           /* exact fit */
     unsigned int offset;    /* offset to show while printing */
     char* hex;
@@ -195,195 +193,6 @@
 
 
 /*
- * Allocate a bit vector with enough space to hold at least the specified
- * number of bits.
- */
-BitVector* dvmAllocBitVector(int startBits, bool expandable)
-{
-    BitVector* bv;
-    int count;
-
-    assert(sizeof(bv->storage[0]) == 4);        /* assuming 32-bit units */
-    assert(startBits >= 0);
-
-    bv = (BitVector*) malloc(sizeof(BitVector));
-
-    count = (startBits + 31) >> 5;
-
-    bv->storageSize = count;
-    bv->expandable = expandable;
-    bv->storage = (u4*) malloc(count * sizeof(u4));
-    memset(bv->storage, 0x00, count * sizeof(u4));
-    return bv;
-}
-
-/*
- * Free a BitVector.
- */
-void dvmFreeBitVector(BitVector* pBits)
-{
-    if (pBits == NULL)
-        return;
-
-    free(pBits->storage);
-    free(pBits);
-}
-
-/*
- * "Allocate" the first-available bit in the bitmap.
- *
- * This is not synchronized.  The caller is expected to hold some sort of
- * lock that prevents multiple threads from executing simultaneously in
- * dvmAllocBit/dvmFreeBit.
- */
-int dvmAllocBit(BitVector* pBits)
-{
-    int word, bit;
-
-retry:
-    for (word = 0; word < pBits->storageSize; word++) {
-        if (pBits->storage[word] != 0xffffffff) {
-            /*
-             * There are unallocated bits in this word.  Return the first.
-             */
-            bit = ffs(~(pBits->storage[word])) -1;
-            assert(bit >= 0 && bit < 32);
-            pBits->storage[word] |= 1 << bit;
-            return (word << 5) | bit;
-        }
-    }
-
-    /*
-     * Ran out of space, allocate more if we're allowed to.
-     */
-    if (!pBits->expandable)
-        return -1;
-
-    pBits->storage = realloc(pBits->storage,
-                    (pBits->storageSize + kBitVectorGrowth) * sizeof(u4));
-    memset(&pBits->storage[pBits->storageSize], 0x00,
-        kBitVectorGrowth * sizeof(u4));
-    pBits->storageSize += kBitVectorGrowth;
-    goto retry;
-}
-
-/*
- * Mark the specified bit as "set".
- *
- * Returns "false" if the bit is outside the range of the vector and we're
- * not allowed to expand.
- */
-bool dvmSetBit(BitVector* pBits, int num)
-{
-    assert(num >= 0);
-    if (num >= pBits->storageSize * (int)sizeof(u4) * 8) {
-        if (!pBits->expandable)
-            return false;
-
-        /* Round up to word boundaries for "num+1" bits */
-        int newSize = (num + 1 + 31) >> 5;
-        assert(newSize > pBits->storageSize);
-        pBits->storage = realloc(pBits->storage, newSize * sizeof(u4));
-        memset(&pBits->storage[pBits->storageSize], 0x00,
-            (newSize - pBits->storageSize) * sizeof(u4));
-        pBits->storageSize = newSize;
-    }
-
-    pBits->storage[num >> 5] |= 1 << (num & 0x1f);
-    return true;
-}
-
-/*
- * Mark the specified bit as "clear".
- */
-void dvmClearBit(BitVector* pBits, int num)
-{
-    assert(num >= 0 && num < (int) pBits->storageSize * (int)sizeof(u4) * 8);
-
-    pBits->storage[num >> 5] &= ~(1 << (num & 0x1f));
-}
-
-/*
- * Mark all bits bit as "clear".
- */
-void dvmClearAllBits(BitVector* pBits)
-{
-    int count = pBits->storageSize;
-    memset(pBits->storage, 0, count * sizeof(u4));
-}
-
-/*
- * Determine whether or not the specified bit is set.
- */
-bool dvmIsBitSet(const BitVector* pBits, int num)
-{
-    assert(num >= 0 && num < (int) pBits->storageSize * (int)sizeof(u4) * 8);
-
-    int val = pBits->storage[num >> 5] & (1 << (num & 0x1f));
-    return (val != 0);
-}
-
-/*
- * Count the number of bits that are set.
- */
-int dvmCountSetBits(const BitVector* pBits)
-{
-    int word;
-    int count = 0;
-
-    for (word = 0; word < pBits->storageSize; word++) {
-        u4 val = pBits->storage[word];
-
-        if (val != 0) {
-            if (val == 0xffffffff) {
-                count += 32;
-            } else {
-                /* count the number of '1' bits */
-                while (val != 0) {
-                    val &= val - 1;
-                    count++;
-                }
-            }
-        }
-    }
-
-    return count;
-}
-
-/*
- * Copy a whole vector to the other. Only do that when the both vectors have
- * the same size and attribute.
- */
-bool dvmCopyBitVector(BitVector *dest, const BitVector *src)
-{
-    if (dest->storageSize != src->storageSize ||
-        dest->expandable != src->expandable)
-        return false;
-    memcpy(dest->storage, src->storage, sizeof(u4) * dest->storageSize);
-    return true;
-}
-
-/*
- * Intersect two bit vectores and merge the result on top of the pre-existing
- * value in the dest vector.
- */
-bool dvmIntersectBitVectors(BitVector *dest, const BitVector *src1,
-                            const BitVector *src2)
-{
-    if (dest->storageSize != src1->storageSize ||
-        dest->storageSize != src2->storageSize ||
-        dest->expandable != src1->expandable ||
-        dest->expandable != src2->expandable)
-        return false;
-
-    int i;
-    for (i = 0; i < dest->storageSize; i++) {
-        dest->storage[i] |= src1->storage[i] & src2->storage[i];
-    }
-    return true;
-}
-
-/*
  * Return a newly-allocated string in which all occurrences of '.' have
  * been changed to '/'.  If we find a '/' in the original string, NULL
  * is returned to avoid ambiguity.
@@ -444,7 +253,7 @@
     }
 
     // Allocate enough space.
-    char* result = malloc(resultLength + 1);
+    char* result = (char*)malloc(resultLength + 1);
     if (result == NULL) {
         return NULL;
     }
@@ -488,7 +297,7 @@
         str++; /* Skip the 'L'. */
     }
 
-    newStr = malloc(at + 1); /* Add one for the '\0'. */
+    newStr = (char*)malloc(at + 1); /* Add one for the '\0'. */
     if (newStr == NULL)
         return NULL;
 
@@ -522,7 +331,7 @@
         wrapElSemi = 1;
     }
 
-    newStr = at = malloc(length + 1); /* + 1 for the '\0' */
+    newStr = at = (char*)malloc(length + 1); /* + 1 for the '\0' */
 
     if (newStr == NULL) {
         return NULL;
@@ -557,7 +366,7 @@
 {
     if (str[0] == 'L') {
         size_t length = strlen(str) - 1;
-        char* newStr = malloc(length);
+        char* newStr = (char*)malloc(length);
 
         if (newStr == NULL) {
             return NULL;
@@ -579,7 +388,7 @@
 {
     if (str[0] != '[') {
         size_t length = strlen(str);
-        char* descriptor = malloc(length + 3);
+        char* descriptor = (char*)malloc(length + 3);
 
         if (descriptor == NULL) {
             return NULL;
diff --git a/vm/Misc.h b/vm/Misc.h
index 44f853b..4dc983e 100644
--- a/vm/Misc.h
+++ b/vm/Misc.h
@@ -128,55 +128,6 @@
 #endif
     ;
 
-
-/*
- * Expanding bitmap, used for tracking resources.  Bits are numbered starting
- * from zero.
- *
- * All operations on a BitVector are unsynchronized.
- */
-typedef struct BitVector {
-    bool    expandable;     /* expand bitmap if we run out? */
-    int     storageSize;    /* current size, in 32-bit words */
-    u4*     storage;
-} BitVector;
-
-/* allocate a bit vector with enough space to hold "startBits" bits */
-BitVector* dvmAllocBitVector(int startBits, bool expandable);
-void dvmFreeBitVector(BitVector* pBits);
-
-/*
- * dvmAllocBit always allocates the first possible bit.  If we run out of
- * space in the bitmap, and it's not marked expandable, dvmAllocBit
- * returns -1.
- *
- * dvmSetBit sets the specified bit, expanding the vector if necessary
- * (and possible).
- *
- * dvmIsBitSet returns "true" if the bit is set.
- */
-int dvmAllocBit(BitVector* pBits);
-bool dvmSetBit(BitVector* pBits, int num);
-void dvmClearBit(BitVector* pBits, int num);
-void dvmClearAllBits(BitVector* pBits);
-bool dvmIsBitSet(const BitVector* pBits, int num);
-
-/* count the number of bits that have been set */
-int dvmCountSetBits(const BitVector* pBits);
-
-/* copy one vector to the other compatible one */
-bool dvmCopyBitVector(BitVector *dest, const BitVector *src);
-
-/*
- * Intersect two bit vectores and merge the result on top of the pre-existing
- * value in the dest vector.
- */
-bool dvmIntersectBitVectors(BitVector *dest, const BitVector *src1,
-                            const BitVector *src2);
-
-#define kBitVectorGrowth    4   /* increase by 4 u4s when limit hit */
-
-
 /*
  * Return a newly-allocated string in which all occurrences of '.' have
  * been changed to '/'.  If we find a '/' in the original string, NULL
diff --git a/vm/Native.c b/vm/Native.c
index 8436e7f..a06bf3c 100644
--- a/vm/Native.c
+++ b/vm/Native.c
@@ -124,7 +124,7 @@
         free(desc);
     }
 
-    dvmThrowException("Ljava/lang/UnsatisfiedLinkError;", method->name);
+    dvmThrowUnsatisfiedLinkError(method->name);
 }
 
 
@@ -200,7 +200,7 @@
 
     ent = dvmHashTableLookup(gDvm.nativeLibs, hash, (void*)pathName,
                 hashcmpNameStr, false);
-    return ent;
+    return (SharedLib*)ent;
 }
 
 /*
@@ -218,8 +218,8 @@
      * our own pointer back.  If somebody beat us to the punch, we'll get
      * their pointer back instead.
      */
-    return dvmHashTableLookup(gDvm.nativeLibs, hash, pLib, hashcmpSharedLib,
-                true);
+    return (SharedLib*)dvmHashTableLookup(gDvm.nativeLibs, hash, pLib,
+                hashcmpSharedLib, true);
 }
 
 /*
@@ -426,7 +426,7 @@
              * top of the stack is around Runtime.loadLibrary().  (See
              * the comments in the JNI FindClass function.)
              */
-            OnLoadFunc func = vonLoad;
+            OnLoadFunc func = (OnLoadFunc)vonLoad;
             Object* prevOverride = self->classLoaderOverride;
 
             self->classLoaderOverride = classLoader;
@@ -558,7 +558,7 @@
 
     *pLen = 4 + descriptorLength + strlen(methodName);
 
-    result = malloc(*pLen +1);
+    result = (char*)malloc(*pLen +1);
     if (result == NULL)
         return NULL;
 
diff --git a/vm/PointerSet.c b/vm/PointerSet.c
index 1d2e814..ca7b537 100644
--- a/vm/PointerSet.c
+++ b/vm/PointerSet.c
@@ -54,10 +54,10 @@
  */
 PointerSet* dvmPointerSetAlloc(int initialSize)
 {
-    PointerSet* pSet = calloc(1, sizeof(PointerSet));
+    PointerSet* pSet = (PointerSet*)calloc(1, sizeof(PointerSet));
     if (pSet != NULL) {
         if (initialSize > 0) {
-            pSet->list = malloc(sizeof(const void*) * initialSize);
+            pSet->list = (const void**)malloc(sizeof(void*) * initialSize);
             if (pSet->list == NULL) {
                 free(pSet);
                 return NULL;
@@ -131,7 +131,7 @@
         else
             pSet->alloc *= 2;
         LOGVV("expanding %p to %d\n", pSet, pSet->alloc);
-        newList = realloc(pSet->list, pSet->alloc * sizeof(const void*));
+        newList = (const void**)realloc(pSet->list, pSet->alloc * sizeof(void*));
         if (newList == NULL) {
             LOGE("Failed expanding ptr set (alloc=%d)\n", pSet->alloc);
             dvmAbort();
@@ -267,7 +267,8 @@
  */
 void dvmPointerSetDump(const PointerSet* pSet)
 {
+    LOGI("PointerSet %p\n", pSet);
     int i;
     for (i = 0; i < pSet->count; i++)
-        printf(" %p", pSet->list[i]);
+        LOGI(" %2d: %p", i, pSet->list[i]);
 }
diff --git a/vm/Profile.c b/vm/Profile.c
index 9b356f6..d0d24b8 100644
--- a/vm/Profile.c
+++ b/vm/Profile.c
@@ -18,6 +18,7 @@
  * Android's method call profiling goodies.
  */
 #include "Dalvik.h"
+#include <interp/InterpDefs.h>
 
 #include <stdlib.h>
 #include <stddef.h>
@@ -145,20 +146,6 @@
     dvmInitMutex(&gDvm.methodTrace.startStopLock);
     pthread_cond_init(&gDvm.methodTrace.threadExitCond, NULL);
 
-    ClassObject* clazz =
-        dvmFindClassNoInit("Ldalvik/system/VMDebug;", NULL);
-    assert(clazz != NULL);
-    gDvm.methodTrace.gcMethod =
-        dvmFindDirectMethodByDescriptor(clazz, "startGC", "()V");
-    gDvm.methodTrace.classPrepMethod =
-        dvmFindDirectMethodByDescriptor(clazz, "startClassPrep", "()V");
-    if (gDvm.methodTrace.gcMethod == NULL ||
-        gDvm.methodTrace.classPrepMethod == NULL)
-    {
-        LOGE("Unable to find startGC or startClassPrep\n");
-        return false;
-    }
-
     assert(!dvmCheckException(dvmThreadSelf()));
 
     /*
@@ -212,28 +199,12 @@
 }
 
 /*
- * Update the "active profilers" count.
- *
- * "count" should be +1 or -1.
+ * Update the set of active profilers
  */
-static void updateActiveProfilers(int count)
+static void updateActiveProfilers(ExecutionSubModes newMode, bool enable)
 {
-    int oldValue, newValue;
-
-    do {
-        oldValue = gDvm.activeProfilers;
-        newValue = oldValue + count;
-        if (newValue < 0) {
-            LOGE("Can't have %d active profilers\n", newValue);
-            dvmAbort();
-        }
-    } while (android_atomic_release_cas(oldValue, newValue,
-            &gDvm.activeProfilers) != 0);
-
-    LOGD("+++ active profiler count now %d\n", newValue);
-#if defined(WITH_JIT)
-    dvmCompilerStateRefresh();
-#endif
+    dvmUpdateInterpBreak(newMode, enable);
+    LOGD("+++ active profiler set now %d\n", gDvm.interpBreak);
 }
 
 
@@ -348,7 +319,9 @@
         dvmMethodTraceStop();
         dvmLockMutex(&state->startStopLock);
     }
-    updateActiveProfilers(1);
+    /* Should only have a single trace going at once */
+    assert((gDvm.interpBreak & kSubModeMethodTrace) == 0);
+    updateActiveProfilers(kSubModeMethodTrace, true);
     LOGI("TRACE STARTED: '%s' %dKB\n", traceFileName, bufferSize / 1024);
 
     /*
@@ -359,7 +332,7 @@
      */
     state->buf = (u1*) malloc(bufferSize);
     if (state->buf == NULL) {
-        dvmThrowException("Ljava/lang/InternalError;", "buffer alloc failed");
+        dvmThrowInternalError("buffer alloc failed");
         goto fail;
     }
     if (!directToDdms) {
@@ -372,7 +345,7 @@
             int err = errno;
             LOGE("Unable to open trace file '%s': %s\n",
                 traceFileName, strerror(err));
-            dvmThrowExceptionFmt("Ljava/lang/RuntimeException;",
+            dvmThrowExceptionFmt(gDvm.exRuntimeException,
                 "Unable to open trace file '%s': %s",
                 traceFileName, strerror(err));
             goto fail;
@@ -416,7 +389,7 @@
     return;
 
 fail:
-    updateActiveProfilers(-1);
+    updateActiveProfilers(kSubModeMethodTrace, false);
     if (state->traceFile != NULL) {
         fclose(state->traceFile);
         state->traceFile = NULL;
@@ -508,7 +481,7 @@
         dvmUnlockMutex(&state->startStopLock);
         return;
     } else {
-        updateActiveProfilers(-1);
+        updateActiveProfilers(kSubModeMethodTrace, false);
     }
 
     /* compute elapsed time */
@@ -574,7 +547,7 @@
     LOGI("TRACE STOPPED%s: writing %d records\n",
         state->overflow ? " (NOTE: overflowed buffer)" : "",
         (finalCurOffset - TRACE_HEADER_LEN) / TRACE_REC_SIZE);
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         LOGW("WARNING: a debugger is active; method-tracing results "
              "will be skewed\n");
     }
@@ -649,7 +622,7 @@
             int err = errno;
             LOGE("trace fwrite(%d) failed: %s\n",
                 finalCurOffset, strerror(err));
-            dvmThrowExceptionFmt("Ljava/lang/RuntimeException;",
+            dvmThrowExceptionFmt(gDvm.exRuntimeException,
                 "Trace data write failed: %s", strerror(err));
         }
     }
@@ -678,6 +651,8 @@
     int oldOffset, newOffset;
     u1* ptr;
 
+    assert(method != NULL);
+
     /*
      * We can only access the per-thread CPU clock from within the
      * thread, so we have to initialize the base time on the first use.
@@ -726,30 +701,27 @@
     *ptr++ = (u1) (clockDiff >> 24);
 }
 
-#if defined(WITH_INLINE_PROFILING)
-#include <interp/InterpDefs.h>
 
 /*
  * Register the METHOD_TRACE_ENTER action for the fast interpreter and
  * JIT'ed code.
  */
-void dvmFastMethodTraceEnter(const Method* method,
-                             const struct InterpState* interpState)
+void dvmFastMethodTraceEnter(const Method* method, Thread* self)
 {
-    if (gDvm.activeProfilers) {
-        dvmMethodTraceAdd(interpState->self, method, METHOD_TRACE_ENTER);
+    if (gDvm.interpBreak & kSubModeMethodTrace) {
+        dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER);
     }
 }
 
 /*
  * Register the METHOD_TRACE_EXIT action for the fast interpreter and
  * JIT'ed code for Java methods. The about-to-return callee method can be
- * retrieved from interpState->method.
+ * retrieved from self->interpSave.method.
  */
-void dvmFastJavaMethodTraceExit(const struct InterpState* interpState)
+void dvmFastJavaMethodTraceExit(Thread* self)
 {
-    if (gDvm.activeProfilers) {
-        dvmMethodTraceAdd(interpState->self, interpState->method,
+    if (gDvm.interpBreak & kSubModeMethodTrace) {
+        dvmMethodTraceAdd(self, self->interpSave.method,
                           METHOD_TRACE_EXIT);
     }
 }
@@ -759,14 +731,12 @@
  * JIT'ed code for JNI methods. The about-to-return JNI callee method is passed
  * in explicitly.
  */
-void dvmFastNativeMethodTraceExit(const Method* method,
-                                  const struct InterpState* interpState)
+void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
 {
-    if (gDvm.activeProfilers) {
-        dvmMethodTraceAdd(interpState->self, method, METHOD_TRACE_EXIT);
+    if (gDvm.interpBreak & kSubModeMethodTrace) {
+        dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT);
     }
 }
-#endif
 
 /*
  * We just did something with a method.  Emit a record by setting a value
@@ -840,11 +810,11 @@
  */
 void dvmMethodTraceGCBegin(void)
 {
-    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
+    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTraceGcMethod);
 }
 void dvmMethodTraceGCEnd(void)
 {
-    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
+    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTraceGcMethod);
 }
 
 /*
@@ -852,11 +822,11 @@
  */
 void dvmMethodTraceClassPrepBegin(void)
 {
-    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
+    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTraceClassPrepMethod);
 }
 void dvmMethodTraceClassPrepEnd(void)
 {
-    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
+    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTraceClassPrepMethod);
 }
 
 
@@ -869,12 +839,11 @@
     if (gDvm.emulatorTracePage == NULL)
         return;
 
-    updateActiveProfilers(1);
-
     /* in theory we should make this an atomic inc; in practice not important */
     gDvm.emulatorTraceEnableCount++;
     if (gDvm.emulatorTraceEnableCount == 1)
         LOGD("--- emulator method traces enabled\n");
+    updateActiveProfilers(kSubModeEmulatorTrace, true);
 }
 
 /*
@@ -886,11 +855,12 @@
         LOGE("ERROR: emulator tracing not enabled\n");
         return;
     }
-    updateActiveProfilers(-1);
     /* in theory we should make this an atomic inc; in practice not important */
     gDvm.emulatorTraceEnableCount--;
     if (gDvm.emulatorTraceEnableCount == 0)
         LOGD("--- emulator method traces disabled\n");
+    updateActiveProfilers(kSubModeEmulatorTrace,
+                          (gDvm.emulatorTraceEnableCount != 0));
 }
 
 
@@ -899,12 +869,9 @@
  */
 void dvmStartInstructionCounting(void)
 {
-#if defined(WITH_INLINE_PROFILING)
-    LOGW("Instruction counting not supported with inline profiling");
-#endif
-    updateActiveProfilers(1);
     /* in theory we should make this an atomic inc; in practice not important */
     gDvm.instructionCountEnableCount++;
+    updateActiveProfilers(kSubModeInstCounting, true);
 }
 
 /*
@@ -916,8 +883,9 @@
         LOGE("ERROR: instruction counting not enabled\n");
         dvmAbort();
     }
-    updateActiveProfilers(-1);
     gDvm.instructionCountEnableCount--;
+    updateActiveProfilers(kSubModeInstCounting,
+                          (gDvm.instructionCountEnableCount != 0));
 }
 
 
diff --git a/vm/Profile.h b/vm/Profile.h
index dd38252..0cd6495 100644
--- a/vm/Profile.h
+++ b/vm/Profile.h
@@ -36,10 +36,6 @@
  * most of this per-thread.
  */
 typedef struct MethodTraceState {
-    /* these are set during VM init */
-    Method* gcMethod;
-    Method* classPrepMethod;
-
     /* active state */
     pthread_mutex_t startStopLock;
     pthread_cond_t  threadExitCond;
@@ -113,30 +109,24 @@
  */
 #define TRACE_METHOD_ENTER(_self, _method)                                 \
     do {                                                                    \
-        if (gDvm.activeProfilers != 0) {                                    \
-            if (gDvm.methodTrace.traceEnabled)                              \
-                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_ENTER);      \
-            if (gDvm.emulatorTraceEnableCount != 0)                         \
-                dvmEmitEmulatorTrace(_method, METHOD_TRACE_ENTER);          \
-        }                                                                   \
+        if (gDvm.interpBreak & kSubModeMethodTrace)                         \
+            dvmMethodTraceAdd(_self, _method, METHOD_TRACE_ENTER);          \
+        if (gDvm.interpBreak & kSubModeEmulatorTrace)                       \
+            dvmEmitEmulatorTrace(_method, METHOD_TRACE_ENTER);              \
     } while(0);
 #define TRACE_METHOD_EXIT(_self, _method)                                  \
     do {                                                                    \
-        if (gDvm.activeProfilers != 0) {                                    \
-            if (gDvm.methodTrace.traceEnabled)                              \
-                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_EXIT);       \
-            if (gDvm.emulatorTraceEnableCount != 0)                         \
-                dvmEmitEmulatorTrace(_method, METHOD_TRACE_EXIT);           \
-        }                                                                   \
+        if (gDvm.interpBreak & kSubModeMethodTrace)                         \
+            dvmMethodTraceAdd(_self, _method, METHOD_TRACE_EXIT);           \
+        if (gDvm.interpBreak & kSubModeEmulatorTrace)                       \
+            dvmEmitEmulatorTrace(_method, METHOD_TRACE_EXIT);               \
     } while(0);
 #define TRACE_METHOD_UNROLL(_self, _method)                                \
     do {                                                                    \
-        if (gDvm.activeProfilers != 0) {                                    \
-            if (gDvm.methodTrace.traceEnabled)                              \
-                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_UNROLL);     \
-            if (gDvm.emulatorTraceEnableCount != 0)                         \
-                dvmEmitEmulatorTrace(_method, METHOD_TRACE_UNROLL);         \
-        }                                                                   \
+        if (gDvm.interpBreak & kSubModeMethodTrace)                         \
+            dvmMethodTraceAdd(_self, _method, METHOD_TRACE_UNROLL);         \
+        if (gDvm.interpBreak & kSubModeEmulatorTrace)                       \
+            dvmEmitEmulatorTrace(_method, METHOD_TRACE_UNROLL);             \
     } while(0);
 
 void dvmMethodTraceAdd(struct Thread* self, const Method* method, int action);
@@ -147,14 +137,9 @@
 void dvmMethodTraceClassPrepBegin(void);
 void dvmMethodTraceClassPrepEnd(void);
 
-#if defined(WITH_INLINE_PROFILING)
-struct InterpState;     // extern
-void dvmFastMethodTraceEnter(const Method* method,
-                             const struct InterpState* interpState);
-void dvmFastJavaMethodTraceExit(const struct InterpState* interpState);
-void dvmFastNativeMethodTraceExit(const Method*method,
-                                  const struct InterpState* interpState);
-#endif
+void dvmFastMethodTraceEnter(const Method* method, struct Thread* self);
+void dvmFastJavaMethodTraceExit(struct Thread* self);
+void dvmFastNativeMethodTraceExit(const Method* method, struct Thread* self);
 
 /*
  * Start/stop alloc counting.
diff --git a/vm/Properties.c b/vm/Properties.c
index 243dc3e..00d4b07 100644
--- a/vm/Properties.c
+++ b/vm/Properties.c
@@ -18,270 +18,24 @@
  */
 #include "Dalvik.h"
 
+#include <cutils/array.h>
 #include <stdlib.h>
-#include <sys/utsname.h>
-#include <limits.h>
-#include <unistd.h>
 
-/*
- * Create some storage for properties read from the command line.
- */
-bool dvmPropertiesStartup(int maxProps)
+bool dvmPropertiesStartup(void)
 {
-    gDvm.maxProps = maxProps;
-    if (maxProps > 0) {
-        gDvm.propList = (char**) malloc(maxProps * sizeof(char*));
-        if (gDvm.propList == NULL)
-            return false;
-    }
-    gDvm.numProps = 0;
-
-    return true;
-}
-
-/*
- * Clean up.
- */
-void dvmPropertiesShutdown(void)
-{
-    int i;
-
-    for (i = 0; i < gDvm.numProps; i++)
-        free(gDvm.propList[i]);
-    free(gDvm.propList);
-    gDvm.propList = NULL;
-}
-
-/*
- * Add a property specified on the command line.  "argStr" has the form
- * "name=value".  "name" must have nonzero length.
- *
- * Returns "true" if argStr appears valid.
- */
-bool dvmAddCommandLineProperty(const char* argStr)
-{
-    char* mangle;
-    char* equals;
-
-    mangle = strdup(argStr);
-    equals = strchr(mangle, '=');
-    if (equals == NULL || equals == mangle) {
-        free(mangle);
+    gDvm.properties = arrayCreate();
+    if (gDvm.properties == NULL) {
         return false;
     }
-    *equals = '\0';
-
-    assert(gDvm.numProps < gDvm.maxProps);
-    gDvm.propList[gDvm.numProps++] = mangle;
-
     return true;
 }
 
-
-/*
- * Find the "put" method for this class.
- *
- * Returns NULL and throws an exception if not found.
- */
-static Method* getPut(ClassObject* clazz)
+void dvmPropertiesShutdown(void)
 {
-    Method* put;
-
-    put = dvmFindVirtualMethodHierByDescriptor(clazz, "setProperty",
-            "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object;");
-    if (put == NULL) {
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "could not find setProperty(String,String) in Properties");
-        /* fall through to return */
+    size_t size = arraySize(gDvm.properties);
+    size_t i;
+    for (i = 0; i < size; ++i) {
+        free(arrayGet(gDvm.properties, i));
     }
-    return put;
-}
-
-/*
- * Set the value of the property.
- */
-static void setProperty(Object* propObj, Method* put, const char* key,
-    const char* value)
-{
-    StringObject* keyStr;
-    StringObject* valueStr;
-
-    if (value == NULL) {
-        /* unclear what to do; probably want to create prop w/ empty string */
-        value = "";
-    }
-
-    keyStr = dvmCreateStringFromCstr(key);
-    valueStr = dvmCreateStringFromCstr(value);
-    if (keyStr == NULL || valueStr == NULL) {
-        LOGW("setProperty string creation failed\n");
-        goto bail;
-    }
-
-    JValue unused;
-    dvmCallMethod(dvmThreadSelf(), put, propObj, &unused, keyStr, valueStr);
-
-bail:
-    dvmReleaseTrackedAlloc((Object*) keyStr, NULL);
-    dvmReleaseTrackedAlloc((Object*) valueStr, NULL);
-}
-
-/*
- * Create the VM-default system properties.
- *
- * We can do them here, or do them in interpreted code with lots of native
- * methods to get bits and pieces.  This is a bit smaller.
- */
-void dvmCreateDefaultProperties(Object* propObj)
-{
-    Method* put = getPut(propObj->clazz);
-
-    if (put == NULL)
-        return;
-
-    struct utsname info;
-    uname(&info);
-
-    /* constant strings that are used multiple times below */
-    const char *projectUrl = "http://www.android.com/";
-    const char *projectName = "The Android Project";
-
-    /*
-     * These are listed in the docs.
-     */
-
-    setProperty(propObj, put, "java.boot.class.path", gDvm.bootClassPathStr);
-    setProperty(propObj, put, "java.class.path", gDvm.classPathStr);
-    setProperty(propObj, put, "java.class.version", "46.0");
-    setProperty(propObj, put, "java.compiler", "");
-    setProperty(propObj, put, "java.ext.dirs", "");
-
-    if (getenv("JAVA_HOME") != NULL) {
-        setProperty(propObj, put, "java.home", getenv("JAVA_HOME"));
-    } else {
-        setProperty(propObj, put, "java.home", "/system");
-    }
-
-    setProperty(propObj, put, "java.io.tmpdir", "/tmp");
-    setProperty(propObj, put, "java.library.path", getenv("LD_LIBRARY_PATH"));
-
-    setProperty(propObj, put, "java.net.preferIPv6Addresses", "true");
-
-    setProperty(propObj, put, "java.vendor", projectName);
-    setProperty(propObj, put, "java.vendor.url", projectUrl);
-    setProperty(propObj, put, "java.version", "0");
-    setProperty(propObj, put, "java.vm.name", "Dalvik");
-    setProperty(propObj, put, "java.vm.specification.name",
-            "Dalvik Virtual Machine Specification");
-    setProperty(propObj, put, "java.vm.specification.vendor", projectName);
-    setProperty(propObj, put, "java.vm.specification.version", "0.9");
-    setProperty(propObj, put, "java.vm.vendor", projectName);
-
-    char tmpBuf[64];
-    sprintf(tmpBuf, "%d.%d.%d",
-        DALVIK_MAJOR_VERSION, DALVIK_MINOR_VERSION, DALVIK_BUG_VERSION);
-    setProperty(propObj, put, "java.vm.version", tmpBuf);
-
-    setProperty(propObj, put, "java.specification.name",
-            "Dalvik Core Library");
-    setProperty(propObj, put, "java.specification.vendor", projectName);
-    setProperty(propObj, put, "java.specification.version", "0.9");
-
-    setProperty(propObj, put, "os.arch", info.machine);
-    setProperty(propObj, put, "os.name", info.sysname);
-    setProperty(propObj, put, "os.version", info.release);
-    setProperty(propObj, put, "user.home", getenv("HOME"));
-    setProperty(propObj, put, "user.name", getenv("USER"));
-
-    char path[PATH_MAX];
-    setProperty(propObj, put, "user.dir", getcwd(path, sizeof(path)));
-
-    setProperty(propObj, put, "file.separator", "/");
-    setProperty(propObj, put, "line.separator", "\n");
-    setProperty(propObj, put, "path.separator", ":");
-
-    /*
-     * These show up elsewhere, so do them here too.
-     */
-    setProperty(propObj, put, "java.runtime.name", "Android Runtime");
-    setProperty(propObj, put, "java.runtime.version", "0.9");
-    setProperty(propObj, put, "java.vm.vendor.url", projectUrl);
-
-    setProperty(propObj, put, "file.encoding", "UTF-8");
-    setProperty(propObj, put, "user.language", "en");
-    setProperty(propObj, put, "user.region", "US");
-
-    /*
-     * These are unique to Android/Dalvik.
-     */
-    setProperty(propObj, put, "android.vm.dexfile", "true");
-}
-
-/*
- * Add anything specified on the command line.
- */
-void dvmSetCommandLineProperties(Object* propObj)
-{
-    Method* put = getPut(propObj->clazz);
-    int i;
-
-    if (put == NULL)
-        return;
-
-    for (i = 0; i < gDvm.numProps; i++) {
-        const char* value;
-
-        /* value starts after the end of the key string */
-        for (value = gDvm.propList[i]; *value != '\0'; value++)
-            ;
-        setProperty(propObj, put, gDvm.propList[i], value+1);
-    }
-}
-
-/*
- * Get a property by calling System.getProperty(key).
- *
- * Returns a newly-allocated string, or NULL on failure or key not found.
- * (Unexpected failures will also raise an exception.)
- */
-char* dvmGetProperty(const char* key)
-{
-    Thread* self = dvmThreadSelf();
-    ClassObject* system;
-    Method* getProp;
-    StringObject* keyObj = NULL;
-    StringObject* valueObj;
-    char* result = NULL;
-
-    assert(key != NULL);
-
-    system = dvmFindSystemClass("Ljava/lang/System;");
-    if (system == NULL)
-        goto bail;
-
-    getProp = dvmFindDirectMethodByDescriptor(system, "getProperty",
-        "(Ljava/lang/String;)Ljava/lang/String;");
-    if (getProp == NULL) {
-        LOGW("Could not find getProperty(String) in java.lang.System\n");
-        goto bail;
-    }
-
-    keyObj = dvmCreateStringFromCstr(key);
-    if (keyObj == NULL)
-        goto bail;
-
-    JValue val;
-    dvmCallMethod(self, getProp, NULL, &val, keyObj);
-    valueObj = (StringObject*) val.l;
-    if (valueObj == NULL)
-        goto bail;
-
-    /* don't need to call dvmAddTrackedAlloc on result; conv to C string safe */
-
-    result = dvmCreateCstrFromString(valueObj);
-    /* fall through with result */
-
-bail:
-    dvmReleaseTrackedAlloc((Object*)keyObj, self);
-    return result;
+    arrayFree(gDvm.properties);
 }
diff --git a/vm/Properties.h b/vm/Properties.h
index f7f2f03..138be41 100644
--- a/vm/Properties.h
+++ b/vm/Properties.h
@@ -22,16 +22,7 @@
 /*
  * Initialization.
  */
-bool dvmPropertiesStartup(int maxProps);
+bool dvmPropertiesStartup(void);
 void dvmPropertiesShutdown(void);
 
-/* add "-D" option to list */
-bool dvmAddCommandLineProperty(const char* argStr);
-
-/* called during property initialization */
-void dvmCreateDefaultProperties(Object* propObj);
-void dvmSetCommandLineProperties(Object* propObj);
-
-char* dvmGetProperty(const char* key);
-
 #endif /*_DALVIK_PROPERTIES*/
diff --git a/vm/RawDexFile.c b/vm/RawDexFile.c
index 5da4907..2c73481 100644
--- a/vm/RawDexFile.c
+++ b/vm/RawDexFile.c
@@ -250,11 +250,20 @@
 }
 
 /* See documentation comment in header. */
-int dvmRawDexFileOpenArray(const u1* pBytes, u4 length,
-    RawDexFile** ppDexFile)
+int dvmRawDexFileOpenArray(u1* pBytes, u4 length, RawDexFile** ppRawDexFile)
 {
-    // TODO - should be very similar to what JarFile does.
-    return -1;
+    DvmDex* pDvmDex = NULL;
+
+    if (!dvmPrepareDexInMemory(pBytes, length, &pDvmDex)) {
+        LOGD("Unable to open raw DEX from array\n");
+        return -1;
+    }
+    assert(pDvmDex != NULL);
+
+    *ppRawDexFile = (RawDexFile*) calloc(1, sizeof(RawDexFile));
+    (*ppRawDexFile)->pDvmDex = pDvmDex;
+
+    return 0;
 }
 
 /*
diff --git a/vm/RawDexFile.h b/vm/RawDexFile.h
index f15aac2..cbcb3b6 100644
--- a/vm/RawDexFile.h
+++ b/vm/RawDexFile.h
@@ -49,8 +49,7 @@
  * On success, returns 0 and sets "*ppDexFile" to a newly-allocated DexFile.
  * On failure, returns a meaningful error code [currently just -1].
  */
-int dvmRawDexFileOpenArray(const u1* pBytes, u4 length,
-    RawDexFile** ppDexFile);
+int dvmRawDexFileOpenArray(u1* pBytes, u4 length, RawDexFile** ppDexFile);
 
 /*
  * Free a RawDexFile structure, along with any associated structures.
diff --git a/vm/SignalCatcher.c b/vm/SignalCatcher.c
index d270b6f..9381312 100644
--- a/vm/SignalCatcher.c
+++ b/vm/SignalCatcher.c
@@ -169,11 +169,6 @@
     dvmCompilerDumpStats();
 #endif
 
-    if (false) {
-        dvmLockMutex(&gDvm.jniGlobalRefLock);
-        dvmDumpReferenceTable(&gDvm.jniGlobalRefTable, "JNI global");
-        dvmUnlockMutex(&gDvm.jniGlobalRefLock);
-    }
     if (false) dvmDumpTrackedAllocations(true);
 
     dvmResumeAllThreads(SUSPEND_FOR_STACK_DUMP);
@@ -216,10 +211,18 @@
 static void handleSigUsr1(void)
 {
     LOGI("SIGUSR1 forcing GC (no HPROF)\n");
-    dvmCollectGarbage(false);
+    dvmCollectGarbage();
 }
 
 #if defined(WITH_JIT) && defined(WITH_JIT_TUNING)
+/* Sample callback function for dvmJitScanAllClassPointers */
+void printAllClass(void *ptr)
+{
+    ClassObject **classPP = (ClassObject **) ptr;
+    LOGE("class %s", (*classPP)->descriptor);
+
+}
+
 /*
  * Respond to a SIGUSR2 by dumping some JIT stats and possibly resetting
  * the code cache.
@@ -228,6 +231,8 @@
 {
     static int codeCacheResetCount = 0;
     if ((--codeCacheResetCount & 7) == 0) {
+        /* Dump all class pointers in the traces */
+        dvmJitScanAllClassPointers(printAllClass);
         gDvmJit.codeCacheFull = true;
     } else {
         dvmCompilerDumpStats();
diff --git a/vm/Sync.c b/vm/Sync.c
index 967a0d0..387cdd1 100644
--- a/vm/Sync.c
+++ b/vm/Sync.c
@@ -14,23 +14,6 @@
  * limitations under the License.
  */
 
-/*
- * Fundamental synchronization mechanisms.
- *
- * The top part of the file has operations on "monitor" structs; the
- * next part has the native calls on objects.
- *
- * The current implementation uses "thin locking" to avoid allocating
- * an Object's full Monitor struct until absolutely necessary (i.e.,
- * during contention or a call to wait()).
- *
- * TODO: make improvements to thin locking
- * We may be able to improve performance and reduce memory requirements by:
- *  - reverting to a thin lock once the Monitor is no longer necessary
- *  - using a pool of monitor objects, with some sort of recycling scheme
- *
- * TODO: recycle native-level monitors when objects are garbage collected.
- */
 #include "Dalvik.h"
 
 #include <fcntl.h>
@@ -38,36 +21,8 @@
 #include <unistd.h>
 #include <pthread.h>
 #include <time.h>
-#include <sys/time.h>
 #include <errno.h>
 
-#define LOG_THIN    LOGV
-
-#ifdef WITH_DEADLOCK_PREDICTION     /* fwd */
-static const char* kStartBanner =
-    "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
-static const char* kEndBanner =
-    "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
-
-/*
- * Unsorted, expanding list of objects.
- *
- * This is very similar to PointerSet (which came into existence after this),
- * but these are unsorted, uniqueness is not enforced by the "add" function,
- * and the base object isn't allocated on the heap.
- */
-typedef struct ExpandingObjectList {
-    u2          alloc;
-    u2          count;
-    Object**    list;
-} ExpandingObjectList;
-
-/* fwd */
-static void updateDeadlockPrediction(Thread* self, Object* obj);
-static void removeCollectedObject(Object* obj);
-static void expandObjClear(ExpandingObjectList* pList);
-#endif
-
 /*
  * Every Object has a monitor associated with it, but not every Object is
  * actually locked.  Even the ones that are locked do not need a
@@ -131,29 +86,6 @@
      */
     char*       ownerFileName;
     u4          ownerLineNumber;
-
-#ifdef WITH_DEADLOCK_PREDICTION
-    /*
-     * Objects that have been locked immediately after this one in the
-     * past.  We use an expanding flat array, allocated on first use, to
-     * minimize allocations.  Deletions from the list, expected to be
-     * infrequent, are crunched down.
-     */
-    ExpandingObjectList historyChildren;
-
-    /*
-     * We also track parents.  This isn't strictly necessary, but it makes
-     * the cleanup at GC time significantly faster.
-     */
-    ExpandingObjectList historyParents;
-
-    /* used during cycle detection */
-    bool        historyMark;
-
-    /* stack trace, established the first time we locked the object */
-    int         historyStackDepth;
-    int*        historyRawStackTrace;
-#endif
 };
 
 
@@ -196,43 +128,12 @@
     mon = gDvm.monitorList;
     while (mon != NULL) {
         nextMon = mon->next;
-
-#ifdef WITH_DEADLOCK_PREDICTION
-        expandObjClear(&mon->historyChildren);
-        expandObjClear(&mon->historyParents);
-        free(mon->historyRawStackTrace);
-#endif
         free(mon);
         mon = nextMon;
     }
 }
 
 /*
- * Log some info about our monitors.
- */
-void dvmDumpMonitorInfo(const char* msg)
-{
-    if (gDvm.zygote) {
-        return;
-    }
-
-    int totalCount;
-    int liveCount;
-
-    totalCount = liveCount = 0;
-    Monitor* mon = gDvm.monitorList;
-    while (mon != NULL) {
-        totalCount++;
-        if (mon->obj != NULL)
-            liveCount++;
-        mon = mon->next;
-    }
-
-    LOGD("%s: monitor list has %d entries (%d live)\n",
-        msg, totalCount, liveCount);
-}
-
-/*
  * Get the object that a monitor is part of.
  */
 Object* dvmGetMonitorObject(Monitor* mon)
@@ -297,19 +198,11 @@
  * Free the monitor associated with an object and make the object's lock
  * thin again.  This is called during garbage collection.
  */
-static void freeObjectMonitor(Object* obj)
+static void freeMonitor(Monitor *mon)
 {
-    Monitor *mon;
-
-    assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
-
-#ifdef WITH_DEADLOCK_PREDICTION
-    if (gDvm.deadlockPredictMode != kDPOff)
-        removeCollectedObject(obj);
-#endif
-
-    mon = LW_MONITOR(obj->lock);
-    obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
+    assert(mon != NULL);
+    assert(mon->obj != NULL);
+    assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT);
 
     /* This lock is associated with an object
      * that's being swept.  The only possible way
@@ -321,11 +214,6 @@
     assert(pthread_mutex_trylock(&mon->lock) == 0);
     assert(pthread_mutex_unlock(&mon->lock) == 0);
     dvmDestroyMutex(&mon->lock);
-#ifdef WITH_DEADLOCK_PREDICTION
-    expandObjClear(&mon->historyChildren);
-    expandObjClear(&mon->historyParents);
-    free(mon->historyRawStackTrace);
-#endif
     free(mon);
 }
 
@@ -340,25 +228,20 @@
 
     assert(mon != NULL);
     assert(isUnmarkedObject != NULL);
-#ifdef WITH_DEADLOCK_PREDICTION
-    dvmDumpMonitorInfo("before monitor sweep");
-#endif
     prev = &handle;
     prev->next = curr = *mon;
     while (curr != NULL) {
         obj = curr->obj;
         if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
-            prev->next = curr = curr->next;
-            freeObjectMonitor(obj);
+            prev->next = curr->next;
+            freeMonitor(curr);
+            curr = prev->next;
         } else {
             prev = curr;
             curr = curr->next;
         }
     }
     *mon = handle.next;
-#ifdef WITH_DEADLOCK_PREDICTION
-    dvmDumpMonitorInfo("after monitor sweep");
-#endif
 }
 
 static char *logWriteInt(char *dst, int value)
@@ -572,8 +455,7 @@
          * The JNI spec says that we should throw IllegalMonitorStateException
          * in this case.
          */
-        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
-                          "unlock of unowned monitor");
+        dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
         return false;
     }
     return true;
@@ -743,7 +625,7 @@
 
     /* Make sure that we hold the lock. */
     if (mon->owner != self) {
-        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+        dvmThrowIllegalMonitorStateException(
             "object not locked by thread before wait()");
         return;
     }
@@ -752,8 +634,7 @@
      * Enforce the timeout range.
      */
     if (msec < 0 || nsec < 0 || nsec > 999999) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "timeout arguments out of range");
+        dvmThrowIllegalArgumentException("timeout arguments out of range");
         return;
     }
 
@@ -871,8 +752,9 @@
          * cleared when this exception is thrown."
          */
         self->interrupted = false;
-        if (interruptShouldThrow)
-            dvmThrowException("Ljava/lang/InterruptedException;", NULL);
+        if (interruptShouldThrow) {
+            dvmThrowInterruptedException(NULL);
+        }
     }
 }
 
@@ -888,7 +770,7 @@
 
     /* Make sure that we hold the lock. */
     if (mon->owner != self) {
-        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+        dvmThrowIllegalMonitorStateException(
             "object not locked by thread before notify()");
         return;
     }
@@ -920,7 +802,7 @@
 
     /* Make sure that we hold the lock. */
     if (mon->owner != self) {
-        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+        dvmThrowIllegalMonitorStateException(
             "object not locked by thread before notifyAll()");
         return;
     }
@@ -973,8 +855,10 @@
 {
     volatile u4 *thinp;
     ThreadStatus oldStatus;
-    useconds_t sleepDelay;
-    const useconds_t maxSleepDelay = 1 << 20;
+    struct timespec tm;
+    long sleepDelayNs;
+    long minSleepDelayNs = 1000000;  /* 1 millisecond */
+    long maxSleepDelayNs = 1000000000;  /* 1 second */
     u4 thin, newThin, threadId;
 
     assert(self != NULL);
@@ -1018,8 +902,8 @@
                 goto retry;
             }
         } else {
-            LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
-                     threadId, &obj->lock, 0, *thinp, thin);
+            LOGV("(%d) spin on lock %p: %#x (%#x) %#x",
+                 threadId, &obj->lock, 0, *thinp, thin);
             /*
              * The lock is owned by another thread.  Notify the VM
              * that we are about to wait.
@@ -1028,7 +912,7 @@
             /*
              * Spin until the thin lock is released or inflated.
              */
-            sleepDelay = 0;
+            sleepDelayNs = 0;
             for (;;) {
                 thin = *thinp;
                 /*
@@ -1056,13 +940,21 @@
                          * The lock has not been released.  Yield so
                          * the owning thread can run.
                          */
-                        if (sleepDelay == 0) {
+                        if (sleepDelayNs == 0) {
                             sched_yield();
-                            sleepDelay = 1000;
+                            sleepDelayNs = minSleepDelayNs;
                         } else {
-                            usleep(sleepDelay);
-                            if (sleepDelay < maxSleepDelay / 2) {
-                                sleepDelay *= 2;
+                            tm.tv_sec = 0;
+                            tm.tv_nsec = sleepDelayNs;
+                            nanosleep(&tm, NULL);
+                            /*
+                             * Prepare the next delay value.  Wrap to
+                             * avoid once a second polls for eternity.
+                             */
+                            if (sleepDelayNs < maxSleepDelayNs / 2) {
+                                sleepDelayNs *= 2;
+                            } else {
+                                sleepDelayNs = minSleepDelayNs;
                             }
                         }
                     }
@@ -1072,14 +964,14 @@
                      * Let the VM know we are no longer waiting and
                      * try again.
                      */
-                    LOG_THIN("(%d) lock %p surprise-fattened",
+                    LOGV("(%d) lock %p surprise-fattened",
                              threadId, &obj->lock);
                     dvmChangeStatus(self, oldStatus);
                     goto retry;
                 }
             }
-            LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
-                     threadId, &obj->lock, 0, *thinp, thin);
+            LOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
+                 threadId, &obj->lock, 0, *thinp, thin);
             /*
              * We have acquired the thin lock.  Let the VM know that
              * we are no longer waiting.
@@ -1089,7 +981,7 @@
              * Fatten the lock.
              */
             inflateMonitor(self, obj);
-            LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
+            LOGV("(%d) lock %p fattened", threadId, &obj->lock);
         }
     } else {
         /*
@@ -1098,56 +990,6 @@
         assert(LW_MONITOR(obj->lock) != NULL);
         lockMonitor(self, LW_MONITOR(obj->lock));
     }
-#ifdef WITH_DEADLOCK_PREDICTION
-    /*
-     * See if we were allowed to grab the lock at this time.  We do it
-     * *after* acquiring the lock, rather than before, so that we can
-     * freely update the Monitor struct.  This seems counter-intuitive,
-     * but our goal is deadlock *prediction* not deadlock *prevention*.
-     * (If we actually deadlock, the situation is easy to diagnose from
-     * a thread dump, so there's no point making a special effort to do
-     * the checks before the lock is held.)
-     *
-     * This needs to happen before we add the object to the thread's
-     * monitor list, so we can tell the difference between first-lock and
-     * re-lock.
-     *
-     * It's also important that we do this while in THREAD_RUNNING, so
-     * that we don't interfere with cleanup operations in the GC.
-     */
-    if (gDvm.deadlockPredictMode != kDPOff) {
-        if (self->status != THREAD_RUNNING) {
-            LOGE("Bad thread status (%d) in DP\n", self->status);
-            dvmDumpThread(self, false);
-            dvmAbort();
-        }
-        assert(!dvmCheckException(self));
-        updateDeadlockPrediction(self, obj);
-        if (dvmCheckException(self)) {
-            /*
-             * If we're throwing an exception here, we need to free the
-             * lock.  We add the object to the thread's monitor list so the
-             * "unlock" code can remove it.
-             */
-            dvmAddToMonitorList(self, obj, false);
-            dvmUnlockObject(self, obj);
-            LOGV("--- unlocked, pending is '%s'\n",
-                dvmGetException(self)->clazz->descriptor);
-        }
-    }
-
-    /*
-     * Add the locked object, and the current stack trace, to the list
-     * held by the Thread object.  If deadlock prediction isn't on,
-     * don't capture the stack trace.
-     */
-    dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
-#elif defined(WITH_MONITOR_TRACKING)
-    /*
-     * Add the locked object to the list held by the Thread object.
-     */
-    dvmAddToMonitorList(self, obj, false);
-#endif
 }
 
 /*
@@ -1196,8 +1038,7 @@
              * We do not own the lock.  The JVM spec requires that we
              * throw an exception in this case.
              */
-            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
-                              "unlock of unowned monitor");
+            dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
             return false;
         }
     } else {
@@ -1213,14 +1054,6 @@
             return false;
         }
     }
-
-#ifdef WITH_MONITOR_TRACKING
-    /*
-     * Remove the object from the Thread's list.
-     */
-    dvmRemoveFromMonitorList(self, obj);
-#endif
-
     return true;
 }
 
@@ -1239,7 +1072,7 @@
         /* Make sure that 'self' holds the lock.
          */
         if (LW_LOCK_OWNER(thin) != self->threadId) {
-            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            dvmThrowIllegalMonitorStateException(
                 "object not locked by thread before wait()");
             return;
         }
@@ -1250,7 +1083,7 @@
          * any other thread gets a chance.
          */
         inflateMonitor(self, obj);
-        LOG_THIN("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
+        LOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
     }
     mon = LW_MONITOR(obj->lock);
     waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
@@ -1270,7 +1103,7 @@
         /* Make sure that 'self' holds the lock.
          */
         if (LW_LOCK_OWNER(thin) != self->threadId) {
-            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            dvmThrowIllegalMonitorStateException(
                 "object not locked by thread before notify()");
             return;
         }
@@ -1298,7 +1131,7 @@
         /* Make sure that 'self' holds the lock.
          */
         if (LW_LOCK_OWNER(thin) != self->threadId) {
-            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            dvmThrowIllegalMonitorStateException(
                 "object not locked by thread before notifyAll()");
             return;
         }
@@ -1537,593 +1370,3 @@
     return 0;  /* Quiet the compiler. */
 }
 #endif  /* WITH_COPYING_GC */
-
-#ifdef WITH_DEADLOCK_PREDICTION
-/*
- * ===========================================================================
- *      Deadlock prediction
- * ===========================================================================
- */
-/*
-The idea is to predict the possibility of deadlock by recording the order
-in which monitors are acquired.  If we see an attempt to acquire a lock
-out of order, we can identify the locks and offending code.
-
-To make this work, we need to keep track of the locks held by each thread,
-and create history trees for each lock.  When a thread tries to acquire
-a new lock, we walk through the "history children" of the lock, looking
-for a match with locks the thread already holds.  If we find a match,
-it means the thread has made a request that could result in a deadlock.
-
-To support recursive locks, we always allow re-locking a currently-held
-lock, and maintain a recursion depth count.
-
-An ASCII-art example, where letters represent Objects:
-
-        A
-       /|\
-      / | \
-     B  |  D
-      \ |
-       \|
-        C
-
-The above is the tree we'd have after handling Object synchronization
-sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
-a child of B.  (The lines represent pointers between parent and child.
-Every node can have multiple parents and multiple children.)
-
-If we hold AC, and want to lock B, we recursively search through B's
-children to see if A or C appears.  It does, so we reject the attempt.
-(A straightforward way to implement it: add a link from C to B, then
-determine whether the graph starting at B contains a cycle.)
-
-If we hold AC and want to lock D, we would succeed, creating a new link
-from C to D.
-
-The lock history and a stack trace is attached to the Object's Monitor
-struct, which means we need to fatten every Object we lock (thin locking
-is effectively disabled).  If we don't need the stack trace we can
-avoid fattening the leaf nodes, only fattening objects that need to hold
-history trees.
-
-Updates to Monitor structs are only allowed for the thread that holds
-the Monitor, so we actually do most of our deadlock prediction work after
-the lock has been acquired.
-
-When an object with a monitor is GCed, we need to remove it from the
-history trees.  There are two basic approaches:
- (1) For through the entire set of known monitors, search all child
-     lists for the object in question.  This is rather slow, resulting
-     in GC passes that take upwards of 10 seconds to complete.
- (2) Maintain "parent" pointers in each node.  Remove the entries as
-     required.  This requires additional storage and maintenance for
-     every operation, but is significantly faster at GC time.
-For each GCed object, we merge all of the object's children into each of
-the object's parents.
-*/
-
-#if !defined(WITH_MONITOR_TRACKING)
-# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
-#endif
-
-/*
- * Clear out the contents of an ExpandingObjectList, freeing any
- * dynamic allocations.
- */
-static void expandObjClear(ExpandingObjectList* pList)
-{
-    if (pList->list != NULL) {
-        free(pList->list);
-        pList->list = NULL;
-    }
-    pList->alloc = pList->count = 0;
-}
-
-/*
- * Get the number of objects currently stored in the list.
- */
-static inline int expandBufGetCount(const ExpandingObjectList* pList)
-{
-    return pList->count;
-}
-
-/*
- * Get the Nth entry from the list.
- */
-static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
-    int i)
-{
-    return pList->list[i];
-}
-
-/*
- * Add a new entry to the list.
- *
- * We don't check for or try to enforce uniqueness.  It's expected that
- * the higher-level code does this for us.
- */
-static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
-{
-    if (pList->count == pList->alloc) {
-        /* time to expand */
-        Object** newList;
-
-        if (pList->alloc == 0)
-            pList->alloc = 4;
-        else
-            pList->alloc *= 2;
-        LOGVV("expanding %p to %d\n", pList, pList->alloc);
-        newList = realloc(pList->list, pList->alloc * sizeof(Object*));
-        if (newList == NULL) {
-            LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
-            dvmAbort();
-        }
-        pList->list = newList;
-    }
-
-    pList->list[pList->count++] = obj;
-}
-
-/*
- * Returns "true" if the element was successfully removed.
- */
-static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
-{
-    int i;
-
-    for (i = pList->count-1; i >= 0; i--) {
-        if (pList->list[i] == obj)
-            break;
-    }
-    if (i < 0)
-        return false;
-
-    if (i != pList->count-1) {
-        /*
-         * The order of elements is not important, so we just copy the
-         * last entry into the new slot.
-         */
-        //memmove(&pList->list[i], &pList->list[i+1],
-        //    (pList->count-1 - i) * sizeof(pList->list[0]));
-        pList->list[i] = pList->list[pList->count-1];
-    }
-
-    pList->count--;
-    pList->list[pList->count] = (Object*) 0xdecadead;
-    return true;
-}
-
-/*
- * Returns "true" if "obj" appears in the list.
- */
-static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
-{
-    int i;
-
-    for (i = 0; i < pList->count; i++) {
-        if (pList->list[i] == obj)
-            return true;
-    }
-    return false;
-}
-
-/*
- * Print the list contents to stdout.  For debugging.
- */
-static void expandObjDump(const ExpandingObjectList* pList)
-{
-    int i;
-    for (i = 0; i < pList->count; i++)
-        printf(" %p", pList->list[i]);
-}
-
-/*
- * Check for duplicate entries.  Returns the index of the first instance
- * of the duplicated value, or -1 if no duplicates were found.
- */
-static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
-{
-    int i, j;
-    for (i = 0; i < pList->count-1; i++) {
-        for (j = i + 1; j < pList->count; j++) {
-            if (pList->list[i] == pList->list[j]) {
-                return i;
-            }
-        }
-    }
-
-    return -1;
-}
-
-
-/*
- * Determine whether "child" appears in the list of objects associated
- * with the Monitor in "parent".  If "parent" is a thin lock, we return
- * false immediately.
- */
-static bool objectInChildList(const Object* parent, Object* child)
-{
-    u4 lock = parent->lock;
-    if (!IS_LOCK_FAT(&lock)) {
-        //LOGI("on thin\n");
-        return false;
-    }
-
-    return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
-}
-
-/*
- * Print the child list.
- */
-static void dumpKids(Object* parent)
-{
-    Monitor* mon = LW_MONITOR(parent->lock);
-
-    printf("Children of %p:", parent);
-    expandObjDump(&mon->historyChildren);
-    printf("\n");
-}
-
-/*
- * Add "child" to the list of children in "parent", and add "parent" to
- * the list of parents in "child".
- */
-static void linkParentToChild(Object* parent, Object* child)
-{
-    //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for merge
-    assert(IS_LOCK_FAT(&parent->lock));
-    assert(IS_LOCK_FAT(&child->lock));
-    assert(parent != child);
-    Monitor* mon;
-
-    mon = LW_MONITOR(parent->lock);
-    assert(!expandObjHas(&mon->historyChildren, child));
-    expandObjAddEntry(&mon->historyChildren, child);
-
-    mon = LW_MONITOR(child->lock);
-    assert(!expandObjHas(&mon->historyParents, parent));
-    expandObjAddEntry(&mon->historyParents, parent);
-}
-
-
-/*
- * Remove "child" from the list of children in "parent".
- */
-static void unlinkParentFromChild(Object* parent, Object* child)
-{
-    //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for GC
-    assert(IS_LOCK_FAT(&parent->lock));
-    assert(IS_LOCK_FAT(&child->lock));
-    assert(parent != child);
-    Monitor* mon;
-
-    mon = LW_MONITOR(parent->lock);
-    if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
-        LOGW("WARNING: child %p not found in parent %p\n", child, parent);
-    }
-    assert(!expandObjHas(&mon->historyChildren, child));
-    assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
-
-    mon = LW_MONITOR(child->lock);
-    if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
-        LOGW("WARNING: parent %p not found in child %p\n", parent, child);
-    }
-    assert(!expandObjHas(&mon->historyParents, parent));
-    assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
-}
-
-
-/*
- * Log the monitors held by the current thread.  This is done as part of
- * flagging an error.
- */
-static void logHeldMonitors(Thread* self)
-{
-    char* name = NULL;
-
-    name = dvmGetThreadName(self);
-    LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
-        self->threadId, name);
-    LOGW("(most-recently-acquired on top):\n");
-    free(name);
-
-    LockedObjectData* lod = self->pLockedObjects;
-    while (lod != NULL) {
-        LOGW("--- object %p[%d] (%s)\n",
-            lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
-        dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
-
-        lod = lod->next;
-    }
-}
-
-/*
- * Recursively traverse the object hierarchy starting at "obj".  We mark
- * ourselves on entry and clear the mark on exit.  If we ever encounter
- * a marked object, we have a cycle.
- *
- * Returns "true" if all is well, "false" if we found a cycle.
- */
-static bool traverseTree(Thread* self, const Object* obj)
-{
-    assert(IS_LOCK_FAT(&obj->lock));
-    Monitor* mon = LW_MONITOR(obj->lock);
-
-    /*
-     * Have we been here before?
-     */
-    if (mon->historyMark) {
-        int* rawStackTrace;
-        int stackDepth;
-
-        LOGW("%s\n", kStartBanner);
-        LOGW("Illegal lock attempt:\n");
-        LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
-
-        rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
-        dvmLogRawStackTrace(rawStackTrace, stackDepth);
-        free(rawStackTrace);
-
-        LOGW(" ");
-        logHeldMonitors(self);
-
-        LOGW(" ");
-        LOGW("Earlier, the following lock order (from last to first) was\n");
-        LOGW("established -- stack trace is from first successful lock):\n");
-        return false;
-    }
-    mon->historyMark = true;
-
-    /*
-     * Examine the children.  We do NOT hold these locks, so they might
-     * very well transition from thin to fat or change ownership while
-     * we work.
-     *
-     * NOTE: we rely on the fact that they cannot revert from fat to thin
-     * while we work.  This is currently a safe assumption.
-     *
-     * We can safely ignore thin-locked children, because by definition
-     * they have no history and are leaf nodes.  In the current
-     * implementation we always fatten the locks to provide a place to
-     * hang the stack trace.
-     */
-    ExpandingObjectList* pList = &mon->historyChildren;
-    int i;
-    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
-        const Object* child = expandBufGetEntry(pList, i);
-        u4 lock = child->lock;
-        if (!IS_LOCK_FAT(&lock))
-            continue;
-        if (!traverseTree(self, child)) {
-            LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
-            dvmLogRawStackTrace(mon->historyRawStackTrace,
-                mon->historyStackDepth);
-            mon->historyMark = false;
-            return false;
-        }
-    }
-
-    mon->historyMark = false;
-
-    return true;
-}
-
-/*
- * Update the deadlock prediction tree, based on the current thread
- * acquiring "acqObj".  This must be called before the object is added to
- * the thread's list of held monitors.
- *
- * If the thread already holds the lock (recursion), or this is a known
- * lock configuration, we return without doing anything.  Otherwise, we add
- * a link from the most-recently-acquired lock in this thread to "acqObj"
- * after ensuring that the parent lock is "fat".
- *
- * This MUST NOT be called while a GC is in progress in another thread,
- * because we assume exclusive access to history trees in owned monitors.
- */
-static void updateDeadlockPrediction(Thread* self, Object* acqObj)
-{
-    LockedObjectData* lod;
-    LockedObjectData* mrl;
-
-    /*
-     * Quick check for recursive access.
-     */
-    lod = dvmFindInMonitorList(self, acqObj);
-    if (lod != NULL) {
-        LOGV("+++ DP: recursive %p\n", acqObj);
-        return;
-    }
-
-    /*
-     * Make the newly-acquired object's monitor "fat".  In some ways this
-     * isn't strictly necessary, but we need the GC to tell us when
-     * "interesting" objects go away, and right now the only way to make
-     * an object look interesting is to give it a monitor.
-     *
-     * This also gives us a place to hang a stack trace.
-     *
-     * Our thread holds the lock, so we're allowed to rewrite the lock
-     * without worrying that something will change out from under us.
-     */
-    if (!IS_LOCK_FAT(&acqObj->lock)) {
-        LOGVV("fattening lockee %p (recur=%d)\n",
-            acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
-        inflateMonitor(self, acqObj);
-    }
-
-    /* if we don't have a stack trace for this monitor, establish one */
-    if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
-        Monitor* mon = LW_MONITOR(acqObj->lock);
-        mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
-            &mon->historyStackDepth);
-    }
-
-    /*
-     * We need to examine and perhaps modify the most-recently-locked
-     * monitor.  We own that, so there's no risk of another thread
-     * stepping on us.
-     *
-     * Retrieve the most-recently-locked entry from our thread.
-     */
-    mrl = self->pLockedObjects;
-    if (mrl == NULL)
-        return;         /* no other locks held */
-
-    /*
-     * Do a quick check to see if "acqObj" is a direct descendant.  We can do
-     * this without holding the global lock because of our assertion that
-     * a GC is not running in parallel -- nobody except the GC can
-     * modify a history list in a Monitor they don't own, and we own "mrl".
-     * (There might be concurrent *reads*, but no concurrent *writes.)
-     *
-     * If we find it, this is a known good configuration, and we're done.
-     */
-    if (objectInChildList(mrl->obj, acqObj))
-        return;
-
-    /*
-     * "mrl" is going to need to have a history tree.  If it's currently
-     * a thin lock, we make it fat now.  The thin lock might have a
-     * nonzero recursive lock count, which we need to carry over.
-     *
-     * Our thread holds the lock, so we're allowed to rewrite the lock
-     * without worrying that something will change out from under us.
-     */
-    if (!IS_LOCK_FAT(&mrl->obj->lock)) {
-        LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
-            mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
-        inflateMonitor(self, mrl->obj);
-    }
-
-    /*
-     * We haven't seen this configuration before.  We need to scan down
-     * acqObj's tree to see if any of the monitors in self->pLockedObjects
-     * appear.  We grab a global lock before traversing or updating the
-     * history list.
-     *
-     * If we find a match for any of our held locks, we know that the lock
-     * has previously been acquired *after* acqObj, and we throw an error.
-     *
-     * The easiest way to do this is to create a link from "mrl" to "acqObj"
-     * and do a recursive traversal, marking nodes as we cross them.  If
-     * we cross one a second time, we have a cycle and can throw an error.
-     * (We do the flag-clearing traversal before adding the new link, so
-     * that we're guaranteed to terminate.)
-     *
-     * If "acqObj" is a thin lock, it has no history, and we can create a
-     * link to it without additional checks.  [ We now guarantee that it's
-     * always fat. ]
-     */
-    bool failed = false;
-    dvmLockMutex(&gDvm.deadlockHistoryLock);
-    linkParentToChild(mrl->obj, acqObj);
-    if (!traverseTree(self, acqObj)) {
-        LOGW("%s\n", kEndBanner);
-        failed = true;
-
-        /* remove the entry so we're still okay when in "warning" mode */
-        unlinkParentFromChild(mrl->obj, acqObj);
-    }
-    dvmUnlockMutex(&gDvm.deadlockHistoryLock);
-
-    if (failed) {
-        switch (gDvm.deadlockPredictMode) {
-        case kDPErr:
-            dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
-            break;
-        case kDPAbort:
-            LOGE("Aborting due to potential deadlock\n");
-            dvmAbort();
-            break;
-        default:
-            /* warn only */
-            break;
-        }
-    }
-}
-
-/*
- * We're removing "child" from existence.  We want to pull all of
- * child's children into "parent", filtering out duplicates.  This is
- * called during the GC.
- *
- * This does not modify "child", which might have multiple parents.
- */
-static void mergeChildren(Object* parent, const Object* child)
-{
-    Monitor* mon;
-    int i;
-
-    assert(IS_LOCK_FAT(&child->lock));
-    mon = LW_MONITOR(child->lock);
-    ExpandingObjectList* pList = &mon->historyChildren;
-
-    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
-        Object* grandChild = expandBufGetEntry(pList, i);
-
-        if (!objectInChildList(parent, grandChild)) {
-            LOGVV("+++  migrating %p link to %p\n", grandChild, parent);
-            linkParentToChild(parent, grandChild);
-        } else {
-            LOGVV("+++  parent %p already links to %p\n", parent, grandChild);
-        }
-    }
-}
-
-/*
- * An object with a fat lock is being collected during a GC pass.  We
- * want to remove it from any lock history trees that it is a part of.
- *
- * This may require updating the history trees in several monitors.  The
- * monitor semantics guarantee that no other thread will be accessing
- * the history trees at the same time.
- */
-static void removeCollectedObject(Object* obj)
-{
-    Monitor* mon;
-
-    LOGVV("+++ collecting %p\n", obj);
-
-    /*
-     * For every parent of this object:
-     *  - merge all of our children into the parent's child list (creates
-     *    a two-way link between parent and child)
-     *  - remove ourselves from the parent's child list
-     */
-    ExpandingObjectList* pList;
-    int i;
-
-    assert(IS_LOCK_FAT(&obj->lock));
-    mon = LW_MONITOR(obj->lock);
-    pList = &mon->historyParents;
-    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
-        Object* parent = expandBufGetEntry(pList, i);
-        Monitor* parentMon = LW_MONITOR(parent->lock);
-
-        if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
-            LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
-        }
-        assert(!expandObjHas(&parentMon->historyChildren, obj));
-
-        mergeChildren(parent, obj);
-    }
-
-    /*
-     * For every child of this object:
-     *  - remove ourselves from the child's parent list
-     */
-    pList = &mon->historyChildren;
-    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
-        Object* child = expandBufGetEntry(pList, i);
-        Monitor* childMon = LW_MONITOR(child->lock);
-
-        if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
-            LOGW("WARNING: parent %p not found in child %p\n", obj, child);
-        }
-        assert(!expandObjHas(&childMon->historyParents, obj));
-    }
-}
-
-#endif /*WITH_DEADLOCK_PREDICTION*/
diff --git a/vm/Sync.h b/vm/Sync.h
index e63fb55..520366e 100644
--- a/vm/Sync.h
+++ b/vm/Sync.h
@@ -157,9 +157,4 @@
 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
                          s8 msec, s4 nsec);
 
-/*
- * Debug.
- */
-void dvmDumpMonitorInfo(const char* msg);
-
 #endif /*_DALVIK_SYNC*/
diff --git a/vm/Thread.c b/vm/Thread.c
index dd87483..eb026fd 100644
--- a/vm/Thread.c
+++ b/vm/Thread.c
@@ -243,14 +243,18 @@
 static int getThreadPriorityFromSystem(void);
 
 /*
- * The JIT needs to know if any thread is suspended.  We do this by
- * maintaining a global sum of all threads' suspend counts.  All suspendCount
- * updates should go through this after aquiring threadSuspendCountLock.
+ * If there is any thread suspend request outstanding,
+ * we need to mark it in interpState to signal the interpreter that
+ * something is pending.  We do this by maintaining a global sum of
+ * all threads' suspend counts.  All suspendCount updates should go
+ * through this after aquiring threadSuspendCountLock.
  */
-static inline void dvmAddToThreadSuspendCount(int *pSuspendCount, int delta)
+static void dvmAddToThreadSuspendCount(int *pSuspendCount, int delta)
 {
     *pSuspendCount += delta;
     gDvm.sumThreadSuspendCount += delta;
+    dvmUpdateInterpBreak(kSubModeSuspendRequest,
+                         (gDvm.sumThreadSuspendCount != 0));
 }
 
 /*
@@ -280,9 +284,6 @@
     dvmInitMutex(&gDvm._threadSuspendLock);
     dvmInitMutex(&gDvm.threadSuspendCountLock);
     pthread_cond_init(&gDvm.threadSuspendCountCond, NULL);
-#ifdef WITH_DEADLOCK_PREDICTION
-    dvmInitMutex(&gDvm.deadlockHistoryLock);
-#endif
 
     /*
      * Dedicated monitor for Thread.sleep().
@@ -317,101 +318,6 @@
 }
 
 /*
- * We're a little farther up now, and can load some basic classes.
- *
- * We're far enough along that we can poke at java.lang.Thread and friends,
- * but should not assume that static initializers have run (or cause them
- * to do so).  That means no object allocations yet.
- */
-bool dvmThreadObjStartup(void)
-{
-    /*
-     * Cache the locations of these classes.  It's likely that we're the
-     * first to reference them, so they're being loaded now.
-     */
-    gDvm.classJavaLangThread =
-        dvmFindSystemClassNoInit("Ljava/lang/Thread;");
-    gDvm.classJavaLangVMThread =
-        dvmFindSystemClassNoInit("Ljava/lang/VMThread;");
-    gDvm.classJavaLangThreadGroup =
-        dvmFindSystemClassNoInit("Ljava/lang/ThreadGroup;");
-    if (gDvm.classJavaLangThread == NULL ||
-        gDvm.classJavaLangThreadGroup == NULL ||
-        gDvm.classJavaLangThreadGroup == NULL)
-    {
-        LOGE("Could not find one or more essential thread classes\n");
-        return false;
-    }
-
-    /*
-     * Cache field offsets.  This makes things a little faster, at the
-     * expense of hard-coding non-public field names into the VM.
-     */
-    gDvm.offJavaLangThread_vmThread =
-        dvmFindFieldOffset(gDvm.classJavaLangThread,
-            "vmThread", "Ljava/lang/VMThread;");
-    gDvm.offJavaLangThread_group =
-        dvmFindFieldOffset(gDvm.classJavaLangThread,
-            "group", "Ljava/lang/ThreadGroup;");
-    gDvm.offJavaLangThread_daemon =
-        dvmFindFieldOffset(gDvm.classJavaLangThread, "daemon", "Z");
-    gDvm.offJavaLangThread_name =
-        dvmFindFieldOffset(gDvm.classJavaLangThread,
-            "name", "Ljava/lang/String;");
-    gDvm.offJavaLangThread_priority =
-        dvmFindFieldOffset(gDvm.classJavaLangThread, "priority", "I");
-
-    if (gDvm.offJavaLangThread_vmThread < 0 ||
-        gDvm.offJavaLangThread_group < 0 ||
-        gDvm.offJavaLangThread_daemon < 0 ||
-        gDvm.offJavaLangThread_name < 0 ||
-        gDvm.offJavaLangThread_priority < 0)
-    {
-        LOGE("Unable to find all fields in java.lang.Thread\n");
-        return false;
-    }
-
-    gDvm.offJavaLangVMThread_thread =
-        dvmFindFieldOffset(gDvm.classJavaLangVMThread,
-            "thread", "Ljava/lang/Thread;");
-    gDvm.offJavaLangVMThread_vmData =
-        dvmFindFieldOffset(gDvm.classJavaLangVMThread, "vmData", "I");
-    if (gDvm.offJavaLangVMThread_thread < 0 ||
-        gDvm.offJavaLangVMThread_vmData < 0)
-    {
-        LOGE("Unable to find all fields in java.lang.VMThread\n");
-        return false;
-    }
-
-    /*
-     * Cache the vtable offset for "run()".
-     *
-     * We don't want to keep the Method* because then we won't find see
-     * methods defined in subclasses.
-     */
-    Method* meth;
-    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThread, "run", "()V");
-    if (meth == NULL) {
-        LOGE("Unable to find run() in java.lang.Thread\n");
-        return false;
-    }
-    gDvm.voffJavaLangThread_run = meth->methodIndex;
-
-    /*
-     * Cache vtable offsets for ThreadGroup methods.
-     */
-    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThreadGroup,
-        "removeThread", "(Ljava/lang/Thread;)V");
-    if (meth == NULL) {
-        LOGE("Unable to find removeThread(Thread) in java.lang.ThreadGroup\n");
-        return false;
-    }
-    gDvm.voffJavaLangThreadGroup_removeThread = meth->methodIndex;
-
-    return true;
-}
-
-/*
  * All threads should be stopped by now.  Clean up some thread globals.
  */
 void dvmThreadShutdown(void)
@@ -954,7 +860,7 @@
     }
     memset(stackBottom, 0xc5, interpStackSize);     // stop valgrind complaints
 #else
-    stackBottom = mmap(NULL, interpStackSize, PROT_READ | PROT_WRITE,
+    stackBottom = (u1*) mmap(NULL, interpStackSize, PROT_READ | PROT_WRITE,
         MAP_PRIVATE | MAP_ANON, -1, 0);
     if (stackBottom == MAP_FAILED) {
 #if defined(WITH_SELF_VERIFICATION)
@@ -970,9 +876,18 @@
     thread->interpStackStart = stackBottom + interpStackSize;
     thread->interpStackEnd = stackBottom + STACK_OVERFLOW_RESERVE;
 
+#ifndef DVM_NO_ASM_INTERP
+    thread->mainHandlerTable = dvmAsmInstructionStart;
+    thread->altHandlerTable = dvmAsmAltInstructionStart;
+    thread->curHandlerTable = thread->mainHandlerTable;
+#endif
+
     /* give the thread code a chance to set things up */
     dvmInitInterpStack(thread, interpStackSize);
 
+    /* One-time setup for interpreter/JIT state */
+    dvmInitInterpreterState(thread);
+
     return thread;
 }
 
@@ -1028,19 +943,9 @@
      * Most threads won't use jniMonitorRefTable, so we clear out the
      * structure but don't call the init function (which allocs storage).
      */
-#ifdef USE_INDIRECT_REF
     if (!dvmInitIndirectRefTable(&thread->jniLocalRefTable,
             kJniLocalRefMin, kJniLocalRefMax, kIndirectKindLocal))
         return false;
-#else
-    /*
-     * The JNI local ref table *must* be fixed-size because we keep pointers
-     * into the table in our stack frames.
-     */
-    if (!dvmInitReferenceTable(&thread->jniLocalRefTable,
-            kJniLocalRefMax, kJniLocalRefMax))
-        return false;
-#endif
     if (!dvmInitReferenceTable(&thread->internalLocalRefTable,
             kInternalRefDefault, kInternalRefMax))
         return false;
@@ -1097,11 +1002,7 @@
 #endif
     }
 
-#ifdef USE_INDIRECT_REF
     dvmClearIndirectRefTable(&thread->jniLocalRefTable);
-#else
-    dvmClearReferenceTable(&thread->jniLocalRefTable);
-#endif
     dvmClearReferenceTable(&thread->internalLocalRefTable);
     if (&thread->jniMonitorRefTable.table != NULL)
         dvmClearReferenceTable(&thread->jniMonitorRefTable);
@@ -1417,7 +1318,7 @@
         char* threadName = dvmCreateCstrFromString(nameStr);
         bool profilerThread = strcmp(threadName, "SamplingProfiler") == 0;
         if (!profilerThread) {
-            dvmThrowExceptionFmt("Ljava/lang/IllegalStateException;",
+            dvmThrowExceptionFmt(gDvm.exIllegalStateException,
                 "No new threads in -Xzygote mode. "
                 "Found thread named '%s'", threadName);
 
@@ -1466,7 +1367,7 @@
 
     if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
         dvmUnlockThreadList();
-        dvmThrowException("Ljava/lang/IllegalThreadStateException;",
+        dvmThrowIllegalThreadStateException(
             "thread has already been started");
         goto fail;
     }
@@ -1502,8 +1403,7 @@
 
         dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, NULL);
 
-        dvmThrowException("Ljava/lang/OutOfMemoryError;",
-            "thread creation failed");
+        dvmThrowOutOfMemoryError("thread creation failed");
         goto fail;
     }
 
@@ -1797,12 +1697,10 @@
     }
 
 bail:
-#if defined(WITH_JIT)
     /* Remove this thread's suspendCount from global suspendCount sum */
     lockThreadSuspendCount();
     dvmAddToThreadSuspendCount(&self->suspendCount, -self->suspendCount);
     unlockThreadSuspendCount();
-#endif
     dvmReleaseTrackedAlloc(exception, self);
 }
 
@@ -2098,7 +1996,7 @@
      */
     if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
         LOGW("WOW: thread start hijack\n");
-        dvmThrowException("Ljava/lang/IllegalThreadStateException;",
+        dvmThrowIllegalThreadStateException(
             "thread has already been started");
         /* We don't want to free anything associated with the thread
          * because someone is obviously interested in it.  Just let
@@ -2197,7 +2095,7 @@
 
         if (curDepth == 1) {
             /* not expecting a lingering break frame; just look at curFrame */
-            assert(!dvmIsBreakFrame(self->curFrame));
+            assert(!dvmIsBreakFrame((u4*)self->curFrame));
             StackSaveArea* ssa = SAVEAREA_FROM_FP(self->curFrame);
             if (dvmIsNativeMethod(ssa->method))
                 topIsNative = true;
@@ -3203,13 +3101,13 @@
         fieldName, "Ljava/lang/ThreadGroup;");
     if (groupField == NULL) {
         LOGE("java.lang.ThreadGroup does not have an '%s' field\n", fieldName);
-        dvmThrowException("Ljava/lang/IncompatibleClassChangeError;", NULL);
+        dvmThrowInternalError("bad definition for ThreadGroup");
         return NULL;
     }
     groupObj = dvmGetStaticFieldObject(groupField);
     if (groupObj == NULL) {
         LOGE("java.lang.ThreadGroup.%s not initialized\n", fieldName);
-        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        dvmThrowInternalError(NULL);
         return NULL;
     }
 
@@ -3620,29 +3518,6 @@
         schedstatBuf, procStatData.utime, procStatData.stime,
         procStatData.processor);
 
-#ifdef WITH_MONITOR_TRACKING
-    if (!isRunning) {
-        LockedObjectData* lod = thread->pLockedObjects;
-        if (lod != NULL)
-            dvmPrintDebugMessage(target, "  | monitors held:\n");
-        else
-            dvmPrintDebugMessage(target, "  | monitors held: <none>\n");
-        while (lod != NULL) {
-            Object* obj = lod->obj;
-            if (obj->clazz == gDvm.classJavaLangClass) {
-                ClassObject* clazz = (ClassObject*) obj;
-                dvmPrintDebugMessage(target, "  >  %p[%d] (%s object for class %s)\n",
-                    obj, lod->recursionCount, obj->clazz->descriptor,
-                    clazz->descriptor);
-            } else {
-                dvmPrintDebugMessage(target, "  >  %p[%d] (%s)\n",
-                    obj, lod->recursionCount, obj->clazz->descriptor);
-            }
-            lod = lod->next;
-        }
-    }
-#endif
-
     if (isRunning)
         dvmDumpRunningThreadStack(target, thread);
     else
@@ -3784,130 +3659,3 @@
     signal(SIGSEGV, SIG_IGN);
     LOGD("Continuing\n");
 }
-
-#ifdef WITH_MONITOR_TRACKING
-/*
- * Count up the #of locked objects in the current thread.
- */
-static int getThreadObjectCount(const Thread* self)
-{
-    LockedObjectData* lod;
-    int count = 0;
-
-    lod = self->pLockedObjects;
-    while (lod != NULL) {
-        count++;
-        lod = lod->next;
-    }
-    return count;
-}
-
-/*
- * Add the object to the thread's locked object list if it doesn't already
- * exist.  The most recently added object is the most likely to be released
- * next, so we insert at the head of the list.
- *
- * If it already exists, we increase the recursive lock count.
- *
- * The object's lock may be thin or fat.
- */
-void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace)
-{
-    LockedObjectData* newLod;
-    LockedObjectData* lod;
-    int* trace;
-    int depth;
-
-    lod = self->pLockedObjects;
-    while (lod != NULL) {
-        if (lod->obj == obj) {
-            lod->recursionCount++;
-            LOGV("+++ +recursive lock %p -> %d\n", obj, lod->recursionCount);
-            return;
-        }
-        lod = lod->next;
-    }
-
-    newLod = (LockedObjectData*) calloc(1, sizeof(LockedObjectData));
-    if (newLod == NULL) {
-        LOGE("malloc failed on %d bytes\n", sizeof(LockedObjectData));
-        return;
-    }
-    newLod->obj = obj;
-    newLod->recursionCount = 0;
-
-    if (withTrace) {
-        trace = dvmFillInStackTraceRaw(self, &depth);
-        newLod->rawStackTrace = trace;
-        newLod->stackDepth = depth;
-    }
-
-    newLod->next = self->pLockedObjects;
-    self->pLockedObjects = newLod;
-
-    LOGV("+++ threadid=%d: added %p, now %d\n",
-        self->threadId, newLod, getThreadObjectCount(self));
-}
-
-/*
- * Remove the object from the thread's locked object list.  If the entry
- * has a nonzero recursion count, we just decrement the count instead.
- */
-void dvmRemoveFromMonitorList(Thread* self, Object* obj)
-{
-    LockedObjectData* lod;
-    LockedObjectData* prevLod;
-
-    lod = self->pLockedObjects;
-    prevLod = NULL;
-    while (lod != NULL) {
-        if (lod->obj == obj) {
-            if (lod->recursionCount > 0) {
-                lod->recursionCount--;
-                LOGV("+++ -recursive lock %p -> %d\n",
-                    obj, lod->recursionCount);
-                return;
-            } else {
-                break;
-            }
-        }
-        prevLod = lod;
-        lod = lod->next;
-    }
-
-    if (lod == NULL) {
-        LOGW("BUG: object %p not found in thread's lock list\n", obj);
-        return;
-    }
-    if (prevLod == NULL) {
-        /* first item in list */
-        assert(self->pLockedObjects == lod);
-        self->pLockedObjects = lod->next;
-    } else {
-        /* middle/end of list */
-        prevLod->next = lod->next;
-    }
-
-    LOGV("+++ threadid=%d: removed %p, now %d\n",
-        self->threadId, lod, getThreadObjectCount(self));
-    free(lod->rawStackTrace);
-    free(lod);
-}
-
-/*
- * If the specified object is already in the thread's locked object list,
- * return the LockedObjectData struct.  Otherwise return NULL.
- */
-LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj)
-{
-    LockedObjectData* lod;
-
-    lod = self->pLockedObjects;
-    while (lod != NULL) {
-        if (lod->obj == obj)
-            return lod;
-        lod = lod->next;
-    }
-    return NULL;
-}
-#endif /*WITH_MONITOR_TRACKING*/
diff --git a/vm/Thread.h b/vm/Thread.h
index 5fdb869..8b2b442 100644
--- a/vm/Thread.h
+++ b/vm/Thread.h
@@ -21,6 +21,7 @@
 #define _DALVIK_THREAD
 
 #include "jni.h"
+#include "interp/InterpState.h"
 
 #include <errno.h>
 #include <cutils/sched_policy.h>
@@ -32,10 +33,6 @@
 enum { PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP };
 #endif
 
-#ifdef WITH_MONITOR_TRACKING
-struct LockedObjectData;
-#endif
-
 /*
  * Current status; these map to JDWP constants, so don't rearrange them.
  * (If you do alter this, update the strings in dvmDumpThread and the
@@ -70,7 +67,6 @@
 
 /* initialization */
 bool dvmThreadStartup(void);
-bool dvmThreadObjStartup(void);
 void dvmThreadShutdown(void);
 void dvmSlayDaemons(void);
 
@@ -90,15 +86,18 @@
  * These are allocated on the system heap.
  */
 typedef struct Thread {
-    /* small unique integer; useful for "thin" locks and debug messages */
-    u4          threadId;
-
     /*
-     * Thread's current status.  Can only be changed by the thread itself
-     * (i.e. don't mess with this from other threads).
+     * Interpreter state which must be preserved across nested
+     * interpreter invocations (via JNI callbacks).  Must be the first
+     * element in Thread.
      */
-    volatile ThreadStatus status;
-
+    InterpSaveState interpSave;
+    /*
+     * Begin interpreter state which does not need to be preserved, but should
+     * be located towards the beginning of the Thread structure for
+     * efficiency.
+     */
+    JValue      retval;
     /*
      * This is the number of times the thread has been suspended.  When the
      * count drops to zero, the thread resumes.
@@ -122,6 +121,80 @@
     int         suspendCount;
     int         dbgSuspendCount;
 
+    u1*         cardTable;
+
+    /* current limit of stack; flexes for StackOverflowError */
+    const u1*   interpStackEnd;
+
+    /* FP of bottom-most (currently executing) stack frame on interp stack */
+    void*       curFrame;
+    /* current exception, or NULL if nothing pending */
+    Object*     exception;
+
+    /* small unique integer; useful for "thin" locks and debug messages */
+    u4          threadId;
+
+    bool        debugIsMethodEntry;
+    /* interpreter stack size; our stacks are fixed-length */
+    int         interpStackSize;
+    bool        stackOverflowed;
+
+    InterpEntry entryPoint;      // What to do when we start the interpreter
+
+    /* Assembly interpreter handler tables */
+#ifndef DVM_NO_ASM_INTERP
+    void*       curHandlerTable;    // Either main or alt table
+    void*       mainHandlerTable;   // Table of actual instruction handler
+    void*       altHandlerTable;    // Table of breakout handlers
+#else
+    void*       unused0;            // Consume space to keep offsets
+    void*       unused1;            //   the same between builds with
+    void*       unused2;            //   and without assembly interpreters
+#endif
+
+#ifdef WITH_JIT
+    struct JitToInterpEntries jitToInterpEntries;
+    /*
+     * Whether the current top VM frame is in the interpreter or JIT cache:
+     *   NULL    : in the interpreter
+     *   non-NULL: entry address of the JIT'ed code (the actual value doesn't
+     *             matter)
+     */
+    void*             inJitCodeCache;
+    unsigned char*    pJitProfTable;
+    unsigned char**   ppJitProfTable;   // Used to refresh pJitProfTable
+    int               jitThreshold;
+    const void*       jitResumeNPC;
+    const u2*         jitResumeDPC;
+    JitState    jitState;
+    int         icRechainCount;
+    const void* pProfileCountdown;
+#endif
+
+    /* JNI local reference tracking */
+    IndirectRefTable jniLocalRefTable;
+
+#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+    /* Buffer for register state during self verification */
+    struct ShadowSpace* shadowSpace;
+#endif
+    int         currTraceRun;
+    int         totalTraceLen;  // Number of Dalvik insts in trace
+    const u2*   currTraceHead;  // Start of the trace we're building
+    const u2*   currRunHead;    // Start of run we're building
+    int         currRunLen;     // Length of run in 16-bit words
+    const u2*   lastPC;         // Stage the PC for the threaded interpreter
+    intptr_t    threshFilter[JIT_TRACE_THRESH_FILTER_SIZE];
+    JitTraceRun trace[MAX_JIT_RUN_LEN];
+#endif
+
+    /*
+     * Thread's current status.  Can only be changed by the thread itself
+     * (i.e. don't mess with this from other threads).
+     */
+    volatile ThreadStatus status;
+
     /* thread handle, as reported by pthread_self() */
     pthread_t   handle;
 
@@ -131,19 +204,6 @@
     /* start (high addr) of interp stack (subtract size to get malloc addr) */
     u1*         interpStackStart;
 
-    /* current limit of stack; flexes for StackOverflowError */
-    const u1*   interpStackEnd;
-
-    /* interpreter stack size; our stacks are fixed-length */
-    int         interpStackSize;
-    bool        stackOverflowed;
-
-    /* FP of bottom-most (currently executing) stack frame on interp stack */
-    void*       curFrame;
-
-    /* current exception, or NULL if nothing pending */
-    Object*     exception;
-
     /* the java/lang/Thread that we are associated with */
     Object*     threadObj;
 
@@ -153,26 +213,6 @@
     /* internal reference tracking */
     ReferenceTable  internalLocalRefTable;
 
-#if defined(WITH_JIT)
-    /*
-     * Whether the current top VM frame is in the interpreter or JIT cache:
-     *   NULL    : in the interpreter
-     *   non-NULL: entry address of the JIT'ed code (the actual value doesn't
-     *             matter)
-     */
-    void*       inJitCodeCache;
-#if defined(WITH_SELF_VERIFICATION)
-    /* Buffer for register state during self verification */
-    struct ShadowSpace* shadowSpace;
-#endif
-#endif
-
-    /* JNI local reference tracking */
-#ifdef USE_INDIRECT_REF
-    IndirectRefTable jniLocalRefTable;
-#else
-    ReferenceTable  jniLocalRefTable;
-#endif
 
     /* JNI native monitor reference tracking (initialized on first use) */
     ReferenceTable  jniMonitorRefTable;
@@ -214,10 +254,8 @@
     /* JDWP invoke-during-breakpoint support */
     DebugInvokeReq  invokeReq;
 
-#ifdef WITH_MONITOR_TRACKING
-    /* objects locked by this thread; most recent is at head of list */
-    struct LockedObjectData* pLockedObjects;
-#endif
+    /* Interpreter switching */
+    int         nextMode;
 
     /* base time for per-thread CPU timing (used by method profiling) */
     bool        cpuClockBaseSet;
@@ -527,35 +565,4 @@
  */
 void dvmNukeThread(Thread* thread);
 
-#ifdef WITH_MONITOR_TRACKING
-/*
- * Track locks held by the current thread, along with the stack trace at
- * the point the lock was acquired.
- *
- * At any given time the number of locks held across the VM should be
- * fairly small, so there's no reason not to generate and store the entire
- * stack trace.
- */
-typedef struct LockedObjectData {
-    /* the locked object */
-    struct Object*  obj;
-
-    /* number of times it has been locked recursively (zero-based ref count) */
-    int             recursionCount;
-
-    /* stack trace at point of initial acquire */
-    u4              stackDepth;
-    int*            rawStackTrace;
-
-    struct LockedObjectData* next;
-} LockedObjectData;
-
-/*
- * Add/remove/find objects from the thread's monitor list.
- */
-void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace);
-void dvmRemoveFromMonitorList(Thread* self, Object* obj);
-LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj);
-#endif
-
 #endif /*_DALVIK_THREAD*/
diff --git a/vm/UtfString.c b/vm/UtfString.c
index f560dac..58545cd 100644
--- a/vm/UtfString.c
+++ b/vm/UtfString.c
@@ -25,93 +25,52 @@
 #include <stdlib.h>
 
 /*
- * Initialize string globals.
- *
- * This isn't part of the VM init sequence because it's hard to get the
- * timing right -- we need it to happen after java/lang/String has been
- * loaded, but before anybody wants to use a string.  It's easiest to
- * just initialize it on first use.
- *
- * In some unusual circumstances (e.g. trying to throw an exception because
- * String implements java/lang/CharSequence, but CharSequence doesn't exist)
- * we can try to create an exception string internally before anything has
- * really tried to use String.  In that case we basically self-destruct.
- *
- * We're expecting to be essentially single-threaded at this point.
- * We employ atomics to ensure everything is observed correctly, and also
- * to guarantee that we do detect a problem if our assumption is wrong.
+ * Allocate a new instance of the class String, performing first-use
+ * initialization of the class if necessary. Upon success, the
+ * returned value will have all its fields except hashCode already
+ * filled in, including a reference to a newly-allocated char[] for
+ * the contents, sized as given. Additionally, a reference to the
+ * chars array is stored to the pChars pointer. Callers must
+ * subsequently call dvmReleaseTrackedAlloc() on the result pointer.
+ * This function returns NULL on failure.
  */
-static bool stringStartup()
+static StringObject* makeStringObject(u4 charsLength, ArrayObject** pChars)
 {
-    if (gDvm.javaLangStringReady < 0) {
-        LOGE("ERROR: reentrant string initialization\n");
-        assert(false);
-        return false;
+    /*
+     * The String class should have already gotten found (but not
+     * necessarily initialized) before making it here. We assert it
+     * explicitly, since historically speaking, we have had bugs with
+     * regard to when the class String gets set up. The assert helps
+     * make any regressions easier to diagnose.
+     */
+    assert(gDvm.classJavaLangString != NULL);
+
+    if (!dvmIsClassInitialized(gDvm.classJavaLangString)) {
+        /* Perform first-time use initialization of the class. */
+        if (!dvmInitClass(gDvm.classJavaLangString)) {
+            LOGE("FATAL: Could not initialize class String\n");
+            dvmAbort();
+        }
     }
 
-    if (android_atomic_acquire_cas(0, -1, &gDvm.javaLangStringReady) != 0) {
-        LOGE("ERROR: initial string-ready state not 0 (%d)\n",
-            gDvm.javaLangStringReady);
-        return false;
+    Object* result = dvmAllocObject(gDvm.classJavaLangString, ALLOC_DEFAULT);
+    if (result == NULL) {
+        return NULL;
     }
 
-    if (gDvm.classJavaLangString == NULL)
-        gDvm.classJavaLangString =
-            dvmFindSystemClassNoInit("Ljava/lang/String;");
-
-    gDvm.offJavaLangString_value =
-        dvmFindFieldOffset(gDvm.classJavaLangString, "value", "[C");
-    gDvm.offJavaLangString_count =
-        dvmFindFieldOffset(gDvm.classJavaLangString, "count", "I");
-    gDvm.offJavaLangString_offset =
-        dvmFindFieldOffset(gDvm.classJavaLangString, "offset", "I");
-    gDvm.offJavaLangString_hashCode =
-        dvmFindFieldOffset(gDvm.classJavaLangString, "hashCode", "I");
-
-    if (gDvm.offJavaLangString_value < 0 ||
-        gDvm.offJavaLangString_count < 0 ||
-        gDvm.offJavaLangString_offset < 0 ||
-        gDvm.offJavaLangString_hashCode < 0)
-    {
-        LOGE("VM-required field missing from java/lang/String\n");
-        return false;
+    ArrayObject* chars = dvmAllocPrimitiveArray('C', charsLength, ALLOC_DEFAULT);
+    if (chars == NULL) {
+        dvmReleaseTrackedAlloc(result, NULL);
+        return NULL;
     }
 
-    bool badValue = false;
-    if (gDvm.offJavaLangString_value != STRING_FIELDOFF_VALUE) {
-        LOGE("InlineNative: String.value offset = %d, expected %d\n",
-            gDvm.offJavaLangString_value, STRING_FIELDOFF_VALUE);
-        badValue = true;
-    }
-    if (gDvm.offJavaLangString_count != STRING_FIELDOFF_COUNT) {
-        LOGE("InlineNative: String.count offset = %d, expected %d\n",
-            gDvm.offJavaLangString_count, STRING_FIELDOFF_COUNT);
-        badValue = true;
-    }
-    if (gDvm.offJavaLangString_offset != STRING_FIELDOFF_OFFSET) {
-        LOGE("InlineNative: String.offset offset = %d, expected %d\n",
-            gDvm.offJavaLangString_offset, STRING_FIELDOFF_OFFSET);
-        badValue = true;
-    }
-    if (gDvm.offJavaLangString_hashCode != STRING_FIELDOFF_HASHCODE) {
-        LOGE("InlineNative: String.hashCode offset = %d, expected %d\n",
-            gDvm.offJavaLangString_hashCode, STRING_FIELDOFF_HASHCODE);
-        badValue = true;
-    }
-    if (badValue)
-        return false;
+    dvmSetFieldInt(result, STRING_FIELDOFF_COUNT, charsLength);
+    dvmSetFieldObject(result, STRING_FIELDOFF_VALUE, (Object*) chars);
+    dvmReleaseTrackedAlloc((Object*) chars, NULL);
+    /* Leave offset and hashCode set to zero. */
 
-    android_atomic_release_store(1, &gDvm.javaLangStringReady);
-
-    return true;
-}
-
-/*
- * Discard heap-allocated storage.
- */
-void dvmStringShutdown()
-{
-    // currently unused
+    *pChars = chars;
+    return (StringObject*) result;
 }
 
 /*
@@ -278,104 +237,41 @@
 StringObject* dvmCreateStringFromCstrAndLength(const char* utf8Str,
     u4 utf16Length)
 {
-    StringObject* newObj;
-    ArrayObject* chars;
-    u4 hashCode = 0;
-
-    //LOGV("Creating String from '%s'\n", utf8Str);
     assert(utf8Str != NULL);
 
-    if (gDvm.javaLangStringReady <= 0) {
-        if (!stringStartup())
-            return NULL;
-    }
-
-    /* init before alloc */
-    if (!dvmIsClassInitialized(gDvm.classJavaLangString) &&
-        !dvmInitClass(gDvm.classJavaLangString))
-    {
+    ArrayObject* chars;
+    StringObject* newObj = makeStringObject(utf16Length, &chars);
+    if (newObj == NULL) {
         return NULL;
     }
 
-    newObj = (StringObject*) dvmAllocObject(gDvm.classJavaLangString,
-                ALLOC_DEFAULT);
-    if (newObj == NULL)
-        return NULL;
+    dvmConvertUtf8ToUtf16((u2*) chars->contents, utf8Str);
 
-    chars = dvmAllocPrimitiveArray('C', utf16Length, ALLOC_DEFAULT);
-    if (chars == NULL) {
-        dvmReleaseTrackedAlloc((Object*) newObj, NULL);
-        return NULL;
-    }
-    dvmConvertUtf8ToUtf16((u2*)chars->contents, utf8Str);
-    hashCode = dvmComputeUtf16Hash((u2*) chars->contents, utf16Length);
+    u4 hashCode = dvmComputeUtf16Hash((u2*) chars->contents, utf16Length);
+    dvmSetFieldInt((Object*) newObj, STRING_FIELDOFF_HASHCODE, hashCode);
 
-    dvmSetFieldObject((Object*)newObj, STRING_FIELDOFF_VALUE,
-        (Object*)chars);
-    dvmReleaseTrackedAlloc((Object*) chars, NULL);
-    dvmSetFieldInt((Object*)newObj, STRING_FIELDOFF_COUNT, utf16Length);
-    dvmSetFieldInt((Object*)newObj, STRING_FIELDOFF_HASHCODE, hashCode);
-    /* leave offset set to zero */
-
-    /* debugging stuff */
-    //dvmDumpObject((Object*)newObj);
-    //printHexDumpEx(ANDROID_LOG_DEBUG, chars->contents, utf16Length * 2,
-    //    kHexDumpMem);
-
-    /* caller may need to dvmReleaseTrackedAlloc(newObj) */
     return newObj;
 }
 
 /*
- * Create a new java/lang/String object, using the Unicode data.
+ * Create a new java/lang/String object, using the given Unicode data.
  */
 StringObject* dvmCreateStringFromUnicode(const u2* unichars, int len)
 {
-    StringObject* newObj;
-    ArrayObject* chars;
-    u4 hashCode = 0;
-
-    /* we allow a null pointer if the length is zero */
+    /* We allow a NULL pointer if the length is zero. */
     assert(len == 0 || unichars != NULL);
 
-    if (gDvm.javaLangStringReady <= 0) {
-        if (!stringStartup())
-            return NULL;
-    }
-
-    /* init before alloc */
-    if (!dvmIsClassInitialized(gDvm.classJavaLangString) &&
-        !dvmInitClass(gDvm.classJavaLangString))
-    {
+    ArrayObject* chars;
+    StringObject* newObj = makeStringObject(len, &chars);
+    if (newObj == NULL) {
         return NULL;
     }
 
-    newObj = (StringObject*) dvmAllocObject(gDvm.classJavaLangString,
-        ALLOC_DEFAULT);
-    if (newObj == NULL)
-        return NULL;
+    if (len > 0) memcpy(chars->contents, unichars, len * sizeof(u2));
 
-    chars = dvmAllocPrimitiveArray('C', len, ALLOC_DEFAULT);
-    if (chars == NULL) {
-        dvmReleaseTrackedAlloc((Object*) newObj, NULL);
-        return NULL;
-    }
-    if (len > 0)
-        memcpy(chars->contents, unichars, len * sizeof(u2));
-    hashCode = dvmComputeUtf16Hash((u2*) chars->contents, len);
-
-    dvmSetFieldObject((Object*)newObj, STRING_FIELDOFF_VALUE,
-        (Object*)chars);
-    dvmReleaseTrackedAlloc((Object*) chars, NULL);
-    dvmSetFieldInt((Object*)newObj, STRING_FIELDOFF_COUNT, len);
+    u4 hashCode = dvmComputeUtf16Hash((u2*) chars->contents, len);
     dvmSetFieldInt((Object*)newObj, STRING_FIELDOFF_HASHCODE, hashCode);
-    /* leave offset set to zero */
 
-    /* debugging stuff */
-    //dvmDumpObject((Object*)newObj);
-    //printHexDumpEx(ANDROID_LOG_DEBUG, chars->contents, len*2, kHexDumpMem);
-
-    /* caller must dvmReleaseTrackedAlloc(newObj) */
     return newObj;
 }
 
@@ -391,7 +287,7 @@
     int len, byteLen, offset;
     const u2* data;
 
-    assert(gDvm.javaLangStringReady > 0);
+    assert(gDvm.classJavaLangString != NULL);
 
     if (jstr == NULL)
         return NULL;
@@ -436,7 +332,7 @@
     int len, offset;
     const u2* data;
 
-    assert(gDvm.javaLangStringReady > 0);
+    assert(gDvm.classJavaLangString != NULL);
 
     if (jstr == NULL)
         return 0;       // should we throw something?  assert?
@@ -498,7 +394,7 @@
     ArrayObject* chars2;
     int len1, len2, offset1, offset2;
 
-    assert(gDvm.javaLangStringReady > 0);
+    assert(gDvm.classJavaLangString != NULL);
 
     /* get offset and length into char array; all values are in 16-bit units */
     len1 = dvmGetFieldInt((Object*) strObj1, STRING_FIELDOFF_COUNT);
@@ -521,3 +417,40 @@
                   (const u2*) chars2->contents + offset2,
                   len1 * sizeof(u2));
 }
+
+ArrayObject* dvmCreateStringArray(char** strings, size_t count)
+{
+    Thread* self = dvmThreadSelf();
+
+    /*
+     * Allocate an array to hold the String objects.
+     */
+    ArrayObject* stringArray =
+        dvmAllocObjectArray(gDvm.classJavaLangString, count, ALLOC_DEFAULT);
+    if (stringArray == NULL) {
+        /* probably OOM */
+        LOGD("Failed allocating array of %d strings\n", count);
+        assert(dvmCheckException(self));
+        return NULL;
+    }
+
+    /*
+     * Create the individual String objects and add them to the array.
+     */
+    size_t i;
+    for (i = 0; i < count; i++) {
+        Object* str =
+            (Object*) dvmCreateStringFromCstr(strings[i]);
+        if (str == NULL) {
+            /* probably OOM; drop out now */
+            assert(dvmCheckException(self));
+            dvmReleaseTrackedAlloc((Object*) stringArray, self);
+            return NULL;
+        }
+        dvmSetObjectArrayElement(stringArray, i, str);
+        /* stored in tracked array, okay to release */
+        dvmReleaseTrackedAlloc(str, self);
+    }
+
+    return stringArray;
+}
diff --git a/vm/UtfString.h b/vm/UtfString.h
index b291f5a..793d8bc 100644
--- a/vm/UtfString.h
+++ b/vm/UtfString.h
@@ -56,6 +56,16 @@
 u4 dvmComputeStringHash(const StringObject* strObj);
 
 /*
+ * Create a java.lang.String[] from an array of C strings.
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the returned array,
+ * but not on the individual elements.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+ArrayObject* dvmCreateStringArray(char** strings, size_t count);
+
+/*
  * Create a java/lang/String from a C string.
  *
  * The caller must call dvmReleaseTrackedAlloc() on the return value.
diff --git a/vm/alloc/Alloc.c b/vm/alloc/Alloc.c
index 630743b..5d814b3 100644
--- a/vm/alloc/Alloc.c
+++ b/vm/alloc/Alloc.c
@@ -21,11 +21,6 @@
 #include "alloc/HeapInternal.h"
 #include "alloc/HeapSource.h"
 
-#if WITH_HPROF_STACK
-#include "hprof/Hprof.h"
-#endif
-
-
 /*
  * Initialize the GC universe.
  *
@@ -176,18 +171,11 @@
 
     assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz));
 
-    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE)) {
-        flags |= ALLOC_FINALIZABLE;
-    }
-
     /* allocate on GC heap; memory is zeroed out */
-    newObj = dvmMalloc(clazz->objectSize, flags);
+    newObj = (Object*)dvmMalloc(clazz->objectSize, flags);
     if (newObj != NULL) {
         DVM_OBJECT_INIT(newObj, clazz);
-#if WITH_HPROF_STACK
-        hprofFillInStackTrace(newObj);
-#endif
-        dvmTrackAllocation(clazz, clazz->objectSize);
+        dvmTrackAllocation(clazz, clazz->objectSize);   /* notify DDMS */
     }
 
     return newObj;
@@ -199,43 +187,43 @@
  * We use the size actually allocated, rather than obj->clazz->objectSize,
  * because the latter doesn't work for array objects.
  */
-Object* dvmCloneObject(Object* obj)
+Object* dvmCloneObject(Object* obj, int flags)
 {
+    ClassObject* clazz;
     Object* copy;
-    int size;
-    int flags;
+    size_t size;
 
     assert(dvmIsValidObject(obj));
+    clazz = obj->clazz;
 
     /* Class.java shouldn't let us get here (java.lang.Class is final
      * and does not implement Clonable), but make extra sure.
      * A memcpy() clone will wreak havoc on a ClassObject's "innards".
      */
-    assert(obj->clazz != gDvm.classJavaLangClass);
+    assert(clazz != gDvm.classJavaLangClass);
 
-    if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE))
-        flags = ALLOC_DEFAULT | ALLOC_FINALIZABLE;
-    else
-        flags = ALLOC_DEFAULT;
-
-    if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
         size = dvmArrayObjectSize((ArrayObject *)obj);
     } else {
-        size = obj->clazz->objectSize;
+        size = clazz->objectSize;
     }
 
-    copy = dvmMalloc(size, flags);
+    copy = (Object*)dvmMalloc(size, flags);
     if (copy == NULL)
         return NULL;
-#if WITH_HPROF_STACK
-    hprofFillInStackTrace(copy);
-    dvmTrackAllocation(obj->clazz, size);
-#endif
 
+    /* We assume that memcpy will copy obj by words. */
     memcpy(copy, obj, size);
     DVM_LOCK_INIT(&copy->lock);
     dvmWriteBarrierObject(copy);
 
+    /* Mark the clone as finalizable if appropriate. */
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE)) {
+        dvmSetFinalizable(copy);
+    }
+
+    dvmTrackAllocation(clazz, size);    /* notify DDMS */
+
     return copy;
 }
 
@@ -295,7 +283,7 @@
 /*
  * Explicitly initiate garbage collection.
  */
-void dvmCollectGarbage(bool clearSoftReferences)
+void dvmCollectGarbage(void)
 {
     if (gDvm.disableExplicitGc) {
         return;
@@ -313,8 +301,8 @@
 
 static void countInstancesOfClassCallback(void *ptr, void *arg)
 {
-    CountContext *ctx = arg;
-    const Object *obj = ptr;
+    CountContext *ctx = (CountContext *)arg;
+    const Object *obj = (const Object *)ptr;
 
     assert(ctx != NULL);
     if (obj->clazz == ctx->clazz) {
@@ -334,11 +322,11 @@
 
 static void countAssignableInstancesOfClassCallback(void *ptr, void *arg)
 {
-    CountContext *ctx = arg;
-    const Object *obj = ptr;
+    CountContext *ctx = (CountContext *)arg;
+    const Object *obj = (const Object *)ptr;
 
     assert(ctx != NULL);
-    if (dvmInstanceof(obj->clazz, ctx->clazz)) {
+    if (obj->clazz != NULL && dvmInstanceof(obj->clazz, ctx->clazz)) {
         ctx->count += 1;
     }
 }
diff --git a/vm/alloc/Alloc.h b/vm/alloc/Alloc.h
index eba53df..4222ee4 100644
--- a/vm/alloc/Alloc.h
+++ b/vm/alloc/Alloc.h
@@ -59,16 +59,9 @@
 enum {
     ALLOC_DEFAULT       = 0x00,
     ALLOC_DONT_TRACK    = 0x01,     /* don't add to internal tracking list */
-    ALLOC_FINALIZABLE   = 0x02,     /* call finalize() before freeing */
 };
 
 /*
- * Call when a request is so far off that we can't call dvmMalloc().  Throws
- * an exception with the specified message.
- */
-void dvmThrowBadAllocException(const char* msg);
-
-/*
  * Track an object reference that is currently only visible internally.
  * This is called automatically by dvmMalloc() unless ALLOC_DONT_TRACK
  * is set.
@@ -94,45 +87,14 @@
 /*
  * Create a copy of an object.
  *
- * The new object will be added to the "tracked alloc" table.
+ * Returns NULL and throws an exception on failure.
  */
-Object* dvmCloneObject(Object* obj);
+Object* dvmCloneObject(Object* obj, int flags);
 
 /*
- * Validate the object pointer.  Returns "false" and throws an exception if
- * "obj" is null or invalid.
- *
- * This may be used in performance critical areas as a null-pointer check;
- * anything else here should be for debug builds only.  In particular, for
- * "release" builds we want to skip the call to dvmIsValidObject() -- the
- * classfile validation will screen out code that puts invalid data into
- * object reference registers.
+ * Make the object finalizable.
  */
-INLINE int dvmValidateObject(Object* obj)
-{
-    if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
-        return false;
-    }
-#ifdef WITH_EXTRA_OBJECT_VALIDATION
-    if (!dvmIsValidObject(obj)) {
-        dvmAbort();
-        dvmThrowException("Ljava/lang/InternalError;",
-            "VM detected invalid object ptr");
-        return false;
-    }
-#endif
-#ifndef NDEBUG
-    /* check for heap corruption */
-    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
-        dvmAbort();
-        dvmThrowException("Ljava/lang/InternalError;",
-            "VM detected invalid object class ptr");
-        return false;
-    }
-#endif
-    return true;
-}
+void dvmSetFinalizable(Object* obj);
 
 /*
  * Determine the exact number of GC heap bytes used by an object.  (Internal
@@ -156,11 +118,9 @@
  * Initiate garbage collection.
  *
  * This usually happens automatically, but can also be caused by
- * Runtime.gc().  If clearSoftReferences is true, the garbage
- * collector will not attempt to preserve any softly-reachable
- * SoftReference referents.
+ * Runtime.gc().
  */
-void dvmCollectGarbage(bool clearSoftReferences);
+void dvmCollectGarbage(void);
 
 /*
  * Returns a count of the direct instances of a class.
diff --git a/vm/alloc/CardTable.c b/vm/alloc/CardTable.c
index 02b6c47..2de586e 100644
--- a/vm/alloc/CardTable.c
+++ b/vm/alloc/CardTable.c
@@ -17,6 +17,8 @@
 #include <sys/mman.h>  /* for PROT_* */
 
 #include "Dalvik.h"
+#include "alloc/HeapBitmap.h"
+#include "alloc/HeapBitmapInlines.h"
 #include "alloc/HeapSource.h"
 #include "alloc/Visit.h"
 
@@ -42,11 +44,7 @@
  * byte is equal to GC_DIRTY_CARD. See dvmCardTableStartup for details.
  */
 
-/*
- * Initializes the card table; must be called before any other
- * dvmCardTable*() functions.
- */
-bool dvmCardTableStartup(size_t heapMaximumSize)
+static bool allocCardTable(size_t heapMaximumSize)
 {
     size_t length;
     void *allocBase;
@@ -58,13 +56,14 @@
 
     /* Set up the card table */
     length = heapMaximumSize / GC_CARD_SIZE;
+    assert(length * GC_CARD_SIZE == heapMaximumSize);
     /* Allocate an extra 256 bytes to allow fixed low-byte of base */
     allocBase = dvmAllocRegion(length + 0x100, PROT_READ | PROT_WRITE,
                             "dalvik-card-table");
     if (allocBase == NULL) {
         return false;
     }
-    gcHeap->cardTableBase = allocBase;
+    gcHeap->cardTableBase = (u1*)allocBase;
     gcHeap->cardTableLength = length;
     /* All zeros is the correct initial value; all clean. */
     assert(GC_CARD_CLEAN == 0);
@@ -81,19 +80,109 @@
     return true;
 }
 
+static bool allocModUnionTable(size_t heapMaximumSize)
+{
+    size_t length = heapMaximumSize / GC_CARD_SIZE / HB_BITS_PER_WORD;
+    assert(length * GC_CARD_SIZE * HB_BITS_PER_WORD == heapMaximumSize);
+    int prot = PROT_READ | PROT_WRITE;
+    void *allocBase = dvmAllocRegion(length, prot, "dalvik-modunion-table");
+    if (allocBase == NULL) {
+        return false;
+    }
+    GcHeap *gcHeap = gDvm.gcHeap;
+    gcHeap->modUnionTableBase = (u1*)allocBase;
+    gcHeap->modUnionTableLength = length;
+    return true;
+}
+
+/*
+ * Initializes the card table; must be called before any other
+ * dvmCardTable*() functions.
+ */
+bool dvmCardTableStartup(size_t heapMaximumSize)
+{
+    return allocCardTable(heapMaximumSize) && allocModUnionTable(heapMaximumSize);
+}
+
+/*
+ * Releases storage for the card table and clears its globals.
+ */
+static void freeCardTable()
+{
+    if (gDvm.biasedCardTableBase == NULL) {
+        return;
+    }
+    gDvm.biasedCardTableBase = NULL;
+    munmap(gDvm.gcHeap->cardTableBase, gDvm.gcHeap->cardTableLength);
+    gDvm.gcHeap->cardTableBase = NULL;
+    gDvm.gcHeap->cardTableLength = 0;
+}
+
+/*
+ * Releases storage for the mod union table and clears its globals.
+ */
+static void freeModUnionTable()
+{
+    if (gDvm.gcHeap->modUnionTableBase == NULL) {
+        return;
+    }
+    munmap(gDvm.gcHeap->modUnionTableBase, gDvm.gcHeap->modUnionTableLength);
+    gDvm.gcHeap->modUnionTableBase = NULL;
+    gDvm.gcHeap->modUnionTableLength = 0;
+}
+
 /*
  * Tears down the entire CardTable.
  */
 void dvmCardTableShutdown()
 {
-    gDvm.biasedCardTableBase = NULL;
-    munmap(gDvm.gcHeap->cardTableBase, gDvm.gcHeap->cardTableLength);
+    freeCardTable();
+    freeModUnionTable();
+}
+
+/*
+ * Set a bit in the mod union table for each dirty byte in the card
+ * table.  Clears the corresponding byte in the card table.
+ */
+static void moveCardsToModUnion(u1 *base, u1 *limit)
+{
+    GcHeap *h = gDvm.gcHeap;
+    u1 *baseCard = dvmCardFromAddr(base);
+    u1 *limitCard = dvmCardFromAddr(limit);
+    u4 *bits = (u4*)h->modUnionTableBase;
+    u1 *heapBase = (u1*)dvmHeapSourceGetBase();
+    u1 *card;
+    for (card = baseCard; card < limitCard; ++card) {
+        if (*card == GC_CARD_CLEAN) {
+            continue;
+        }
+        u1 *addr = (u1*)dvmAddrFromCard(card);
+        u1 *biased = (u1*)((uintptr_t)addr - (uintptr_t)heapBase);
+        size_t offset = (uintptr_t)biased / GC_CARD_SIZE / HB_BITS_PER_WORD;
+        u4 bit = 1 << (((uintptr_t)biased / GC_CARD_SIZE) % HB_BITS_PER_WORD);
+        assert((u1*)&bits[offset] >= h->modUnionTableBase);
+        assert((u1*)&bits[offset] < h->modUnionTableBase+h->modUnionTableLength);
+        bits[offset] |= bit;
+        *card = GC_CARD_CLEAN;
+    }
 }
 
 void dvmClearCardTable(void)
 {
-    assert(gDvm.gcHeap->cardTableBase != NULL);
-    memset(gDvm.gcHeap->cardTableBase, GC_CARD_CLEAN, gDvm.gcHeap->cardTableLength);
+    uintptr_t base[HEAP_SOURCE_MAX_HEAP_COUNT];
+    uintptr_t limit[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t numHeaps = dvmHeapSourceGetNumHeaps();
+    dvmHeapSourceGetRegions(base, NULL, limit, numHeaps);
+    size_t i;
+    for (i = 0; i < numHeaps; ++i) {
+        if (i != 0) {
+            moveCardsToModUnion((u1*)base[i], (u1*)limit[i]);
+        } else {
+            u1 *baseCard = dvmCardFromAddr((u1*)base[i]);
+            size_t length = (limit[i] - base[i]) >> GC_CARD_SHIFT;
+            memset(baseCard, GC_CARD_CLEAN, length);
+        }
+    }
 }
 
 /*
@@ -171,7 +260,7 @@
         return;
     }
     assert(dvmIsValidObject(obj));
-    ctx = arg;
+    ctx = (WhiteReferenceCounter *)arg;
     if (dvmHeapBitmapIsObjectBitSet(ctx->markBits, obj)) {
         return;
     }
@@ -193,7 +282,7 @@
         return;
     }
     assert(dvmIsValidObject(obj));
-    ctx = arg;
+    ctx = (WhiteReferenceCounter*)arg;
     if (dvmHeapBitmapIsObjectBitSet(ctx->markBits, obj)) {
         return;
     }
@@ -214,14 +303,14 @@
 
 static void dumpReferencesCallback(void *ptr, void *arg)
 {
-    Object *obj = arg;
+    Object *obj = (Object *)arg;
     if (ptr == obj) {
         return;
     }
-    dvmVisitObject(dumpReferencesVisitor, ptr, &obj);
+    dvmVisitObject(dumpReferencesVisitor, (Object *)ptr, &obj);
     if (obj == NULL) {
         LOGD("Found %p in the heap @ %p", arg, ptr);
-        dvmDumpObject(ptr);
+        dvmDumpObject((Object *)ptr);
     }
 }
 
@@ -311,8 +400,8 @@
  */
 static void verifyCardTableCallback(void *ptr, void *arg)
 {
-    Object *obj = ptr;
-    WhiteReferenceCounter ctx = { arg, 0 };
+    Object *obj = (Object *)ptr;
+    WhiteReferenceCounter ctx = { (HeapBitmap *)arg, 0 };
 
     dvmVisitObject(countWhiteReferenceVisitor, obj, &ctx);
     if (ctx.whiteRefs == 0) {
diff --git a/vm/alloc/Copying.c b/vm/alloc/Copying.c
index 3c41057..f257ce3 100644
--- a/vm/alloc/Copying.c
+++ b/vm/alloc/Copying.c
@@ -24,7 +24,6 @@
 #include "alloc/HeapInternal.h"
 #include "alloc/HeapSource.h"
 #include "alloc/Verify.h"
-#include "alloc/clz.h"
 
 /*
  * A "mostly copying", generational, garbage collector.
@@ -2137,7 +2136,6 @@
     scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
     scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
     scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
-    scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
     scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
     scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
     scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
diff --git a/vm/alloc/DdmHeap.c b/vm/alloc/DdmHeap.c
index 2d661ee..b377b1b 100644
--- a/vm/alloc/DdmHeap.c
+++ b/vm/alloc/DdmHeap.c
@@ -273,7 +273,7 @@
          */
         state = HPSG_STATE(SOLIDITY_FREE, 0);
     } else {
-        const Object *obj = userptr;
+        const Object *obj = (const Object *)userptr;
         /* If we're looking at the native heap, we'll just return
          * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
          */
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
index 0e563c3..c0ef8fb 100644
--- a/vm/alloc/Heap.c
+++ b/vm/alloc/Heap.c
@@ -161,25 +161,6 @@
 }
 
 /*
- * We've been asked to allocate something we can't, e.g. an array so
- * large that (length * elementWidth) is larger than 2^31.
- *
- * _The Java Programming Language_, 4th edition, says, "you can be sure
- * that all SoftReferences to softly reachable objects will be cleared
- * before an OutOfMemoryError is thrown."
- *
- * It's unclear whether that holds for all situations where an OOM can
- * be thrown, or just in the context of an allocation that fails due
- * to lack of heap space.  For simplicity we just throw the exception.
- *
- * (OOM due to actually running out of space is handled elsewhere.)
- */
-void dvmThrowBadAllocException(const char* msg)
-{
-    dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
-}
-
-/*
  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
  * we're going to have to wait on the mutex.
  */
@@ -388,7 +369,7 @@
             /* Don't include a description string;
              * one fewer allocation.
              */
-            dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
+            dvmThrowOutOfMemoryError(NULL);
         } else {
             /*
              * This thread has already tried to throw an OutOfMemoryError,
@@ -435,7 +416,6 @@
  */
 void* dvmMalloc(size_t size, int flags)
 {
-    GcHeap *gcHeap = gDvm.gcHeap;
     void *ptr;
 
     dvmLockHeap();
@@ -446,19 +426,6 @@
     if (ptr != NULL) {
         /* We've got the memory.
          */
-        if ((flags & ALLOC_FINALIZABLE) != 0) {
-            /* This object is an instance of a class that
-             * overrides finalize().  Add it to the finalizable list.
-             */
-            if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
-                                    (Object *)ptr))
-            {
-                LOGE_HEAP("dvmMalloc(): no room for any more "
-                        "finalizable objects\n");
-                dvmAbort();
-            }
-        }
-
         if (gDvm.allocProf.enabled) {
             Thread* self = dvmThreadSelf();
             gDvm.allocProf.allocCount++;
@@ -491,7 +458,7 @@
          * internal tracking list.
          */
         if ((flags & ALLOC_DONT_TRACK) == 0) {
-            dvmAddTrackedAlloc(ptr, NULL);
+            dvmAddTrackedAlloc((Object*)ptr, NULL);
         }
     } else {
         /*
@@ -680,6 +647,7 @@
     /* Mark the set of objects that are strongly reachable from the roots.
      */
     LOGD_HEAP("Marking...");
+    dvmClearCardTable();
     dvmHeapMarkRootSet();
 
     /* dvmHeapScanMarkedObjects() will build the lists of known
@@ -695,7 +663,6 @@
          * heap to allow mutator threads to allocate from free space.
          */
         rootEnd = dvmGetRelativeTimeMsec();
-        dvmClearCardTable();
         dvmUnlockHeap();
         dvmResumeAllThreads(SUSPEND_FOR_GC);
     }
@@ -705,7 +672,7 @@
      * objects will also be marked.
      */
     LOGD_HEAP("Recursing...");
-    dvmHeapScanMarkedObjects();
+    dvmHeapScanMarkedObjects(spec->isPartial);
 
     if (spec->isConcurrent) {
         /*
diff --git a/vm/alloc/HeapBitmap.c b/vm/alloc/HeapBitmap.c
index b3a2bb1..ffeff62 100644
--- a/vm/alloc/HeapBitmap.c
+++ b/vm/alloc/HeapBitmap.c
@@ -16,7 +16,6 @@
 
 #include "Dalvik.h"
 #include "HeapBitmap.h"
-#include "clz.h"
 #include <sys/mman.h>   /* for PROT_* */
 
 /*
@@ -39,7 +38,7 @@
         LOGE("Could not mmap %zd-byte ashmem region '%s'", bitsLen, name);
         return false;
     }
-    hb->bits = bits;
+    hb->bits = (unsigned long *)bits;
     hb->bitsLen = hb->allocLen = bitsLen;
     hb->base = (uintptr_t)base;
     hb->max = hb->base - 1;
@@ -79,6 +78,92 @@
 }
 
 /*
+ * Return true iff <obj> is within the range of pointers that this
+ * bitmap could potentially cover, even if a bit has not been set
+ * for it.
+ */
+bool dvmHeapBitmapCoversAddress(const HeapBitmap *hb, const void *obj)
+{
+    assert(hb != NULL);
+    if (obj != NULL) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        const size_t index = HB_OFFSET_TO_INDEX(offset);
+        return index < hb->bitsLen / sizeof(*hb->bits);
+    }
+    return false;
+}
+
+/*
+ * Visits set bits in address order.  The callback is not permitted to
+ * change the bitmap bits or max during the traversal.
+ */
+void dvmHeapBitmapWalk(const HeapBitmap *bitmap, BitmapCallback *callback,
+                       void *arg)
+{
+    uintptr_t end;
+    uintptr_t i;
+
+    assert(bitmap != NULL);
+    assert(bitmap->bits != NULL);
+    assert(callback != NULL);
+    end = HB_OFFSET_TO_INDEX(bitmap->max - bitmap->base);
+    for (i = 0; i <= end; ++i) {
+        unsigned long word = bitmap->bits[i];
+        if (UNLIKELY(word != 0)) {
+            unsigned long highBit = 1 << (HB_BITS_PER_WORD - 1);
+            uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + bitmap->base;
+            while (word != 0) {
+                const int shift = CLZ(word);
+                void *addr = (void *)(ptrBase + shift * HB_OBJECT_ALIGNMENT);
+                (*callback)(addr, arg);
+                word &= ~(highBit >> shift);
+            }
+        }
+    }
+}
+
+/*
+ * Similar to dvmHeapBitmapWalk but the callback routine is permitted
+ * to change the bitmap bits and max during traversal.  Used by the
+ * the root marking scan exclusively.
+ *
+ * The callback is invoked with a finger argument.  The finger is a
+ * pointer to an address not yet visited by the traversal.  If the
+ * callback sets a bit for an address at or above the finger, this
+ * address will be visited by the traversal.  If the callback sets a
+ * bit for an address below the finger, this address will not be
+ * visited.
+ */
+void dvmHeapBitmapScanWalk(HeapBitmap *bitmap,
+                           uintptr_t base, uintptr_t max,
+                           BitmapScanCallback *callback, void *arg)
+{
+    assert(bitmap != NULL);
+    assert(bitmap->bits != NULL);
+    assert(callback != NULL);
+    assert(base <= max);
+    assert(base >= bitmap->base);
+    assert(max <= bitmap->max);
+    uintptr_t end = HB_OFFSET_TO_INDEX(max - base);
+    uintptr_t i;
+    for (i = 0; i <= end; ++i) {
+        unsigned long word = bitmap->bits[i];
+        if (UNLIKELY(word != 0)) {
+            unsigned long highBit = 1 << (HB_BITS_PER_WORD - 1);
+            uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + bitmap->base;
+            void *finger = (void *)(HB_INDEX_TO_OFFSET(i + 1) + bitmap->base);
+            while (word != 0) {
+                const int shift = CLZ(word);
+                void *addr = (void *)(ptrBase + shift * HB_OBJECT_ALIGNMENT);
+                (*callback)(addr, finger, arg);
+                word &= ~(highBit >> shift);
+            }
+            end = HB_OFFSET_TO_INDEX(bitmap->max - bitmap->base);
+        }
+    }
+}
+
+/*
  * Walk through the bitmaps in increasing address order, and find the
  * object pointers that correspond to garbage objects.  Call
  * <callback> zero or more times with lists of these object pointers.
@@ -86,15 +171,14 @@
  * The callback is not permitted to increase the max of either bitmap.
  */
 void dvmHeapBitmapSweepWalk(const HeapBitmap *liveHb, const HeapBitmap *markHb,
+                            uintptr_t base, uintptr_t max,
                             BitmapSweepCallback *callback, void *callbackArg)
 {
-    static const size_t kPointerBufSize = 128;
-    void *pointerBuf[kPointerBufSize];
+    void *pointerBuf[4 * HB_BITS_PER_WORD];
     void **pb = pointerBuf;
-    size_t index;
     size_t i;
+    size_t start, end;
     unsigned long *live, *mark;
-    uintptr_t offset;
 
     assert(liveHb != NULL);
     assert(liveHb->bits != NULL);
@@ -103,16 +187,19 @@
     assert(liveHb->base == markHb->base);
     assert(liveHb->bitsLen == markHb->bitsLen);
     assert(callback != NULL);
+    assert(base <= max);
+    assert(base >= liveHb->base);
+    assert(max <= liveHb->max);
     if (liveHb->max < liveHb->base) {
         /* Easy case; both are obviously empty.
          */
         return;
     }
-    offset = liveHb->max - liveHb->base;
-    index = HB_OFFSET_TO_INDEX(offset);
+    start = HB_OFFSET_TO_INDEX(base - liveHb->base);
+    end = HB_OFFSET_TO_INDEX(max - liveHb->base);
     live = liveHb->bits;
     mark = markHb->bits;
-    for (i = 0; i <= index; i++) {
+    for (i = start; i <= end; i++) {
         unsigned long garbage = live[i] & ~mark[i];
         if (UNLIKELY(garbage != 0)) {
             unsigned long highBit = 1 << (HB_BITS_PER_WORD - 1);
@@ -124,7 +211,7 @@
             }
             /* Make sure that there are always enough slots available */
             /* for an entire word of 1s. */
-            if (kPointerBufSize - (pb - pointerBuf) < HB_BITS_PER_WORD) {
+            if (pb >= &pointerBuf[NELEM(pointerBuf) - HB_BITS_PER_WORD]) {
                 (*callback)(pb - pointerBuf, pointerBuf, callbackArg);
                 pb = pointerBuf;
             }
diff --git a/vm/alloc/HeapBitmap.h b/vm/alloc/HeapBitmap.h
index 7995f19..fecc2a9 100644
--- a/vm/alloc/HeapBitmap.h
+++ b/vm/alloc/HeapBitmap.h
@@ -18,7 +18,6 @@
 
 #include <limits.h>
 #include <stdint.h>
-#include "clz.h"
 
 #define HB_OBJECT_ALIGNMENT 8
 #define HB_BITS_PER_WORD (sizeof(unsigned long) * CHAR_BIT)
@@ -42,15 +41,6 @@
     (1 << \
         (31-(((uintptr_t)(offset_) / HB_OBJECT_ALIGNMENT) % HB_BITS_PER_WORD)))
 
-/* Return the maximum offset (exclusive) that <hb> can represent.
- */
-#define HB_MAX_OFFSET(hb_) \
-    HB_INDEX_TO_OFFSET((hb_)->bitsLen / sizeof(*(hb_)->bits))
-
-#define HB_INLINE_PROTO(p) \
-    static inline p __attribute__((always_inline)); \
-    static inline p
-
 typedef struct {
     /* The bitmap data, which points to an mmap()ed area of zeroed
      * anonymous memory.
@@ -80,6 +70,9 @@
     uintptr_t max;
 } HeapBitmap;
 
+/*
+ * Callback types used by the walking routines.
+ */
 typedef void BitmapCallback(void *addr, void *arg);
 typedef void BitmapScanCallback(void *addr, void *finger, void *arg);
 typedef void BitmapSweepCallback(size_t numPtrs, void **ptrs, void *arg);
@@ -104,74 +97,24 @@
 void dvmHeapBitmapZero(HeapBitmap *hb);
 
 /*
- * Visits set bits in address order.  The callback is not permitted to
- * change the bitmap bits or max during the traversal.
+ * Returns true if the address range of the bitmap covers the object
+ * address.
  */
-HB_INLINE_PROTO(
-    void
-    dvmHeapBitmapWalk(const HeapBitmap *bitmap,
-                      BitmapCallback *callback, void *arg)
-)
-{
-    assert(bitmap != NULL);
-    assert(bitmap->bits != NULL);
-    assert(callback != NULL);
-    uintptr_t end = HB_OFFSET_TO_INDEX(bitmap->max - bitmap->base);
-    uintptr_t i;
-    for (i = 0; i <= end; ++i) {
-        unsigned long word = bitmap->bits[i];
-        if (UNLIKELY(word != 0)) {
-            unsigned long highBit = 1 << (HB_BITS_PER_WORD - 1);
-            uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + bitmap->base;
-            while (word != 0) {
-                const int shift = CLZ(word);
-                void *addr = (void *)(ptrBase + shift * HB_OBJECT_ALIGNMENT);
-                (*callback)(addr, arg);
-                word &= ~(highBit >> shift);
-            }
-        }
-    }
-}
+bool dvmHeapBitmapCoversAddress(const HeapBitmap *hb, const void *obj);
 
 /*
- * Similar to dvmHeapBitmapWalk but the callback routine is permitted
- * to change the bitmap bits and max during traversal.  Used by the
- * the root marking scan exclusively.
- *
- * The callback is invoked with a finger argument.  The finger is a
- * pointer to an address not yet visited by the traversal.  If the
- * callback sets a bit for an address at or above the finger, this
- * address will be visited by the traversal.  If the callback sets a
- * bit for an address below the finger, this address will not be
- * visited.
+ * Applies the callback function to each set address in the bitmap.
  */
-HB_INLINE_PROTO(
-    void
-    dvmHeapBitmapScanWalk(HeapBitmap *bitmap,
-                          BitmapScanCallback *callback, void *arg)
-)
-{
-    assert(bitmap != NULL);
-    assert(bitmap->bits != NULL);
-    assert(callback != NULL);
-    uintptr_t end = HB_OFFSET_TO_INDEX(bitmap->max - bitmap->base);
-    uintptr_t i;
-    for (i = 0; i <= end; ++i) {
-        unsigned long word = bitmap->bits[i];
-        if (UNLIKELY(word != 0)) {
-            unsigned long highBit = 1 << (HB_BITS_PER_WORD - 1);
-            uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + bitmap->base;
-            void *finger = (void *)(HB_INDEX_TO_OFFSET(i + 1) + bitmap->base);
-            while (word != 0) {
-                const int shift = CLZ(word);
-                void *addr = (void *)(ptrBase + shift * HB_OBJECT_ALIGNMENT);
-                (*callback)(addr, finger, arg);
-                word &= ~(highBit >> shift);
-            }
-            end = HB_OFFSET_TO_INDEX(bitmap->max - bitmap->base);
-        }
-    }
-}
+void dvmHeapBitmapWalk(const HeapBitmap *bitmap,
+                       BitmapCallback *callback, void *callbackArg);
+
+/*
+ * Like dvmHeapBitmapWalk but takes a callback function with a finger
+ * address.
+ */
+void dvmHeapBitmapScanWalk(HeapBitmap *bitmap,
+                           uintptr_t base, uintptr_t max,
+                           BitmapScanCallback *callback, void *arg);
 
 /*
  * Walk through the bitmaps in increasing address order, and find the
@@ -181,126 +124,7 @@
  * The callback is not permitted to increase the max of either bitmap.
  */
 void dvmHeapBitmapSweepWalk(const HeapBitmap *liveHb, const HeapBitmap *markHb,
+                            uintptr_t base, uintptr_t max,
                             BitmapSweepCallback *callback, void *callbackArg);
 
-/*
- * Return true iff <obj> is within the range of pointers that this
- * bitmap could potentially cover, even if a bit has not been set
- * for it.
- */
-HB_INLINE_PROTO(
-    bool
-    dvmHeapBitmapCoversAddress(const HeapBitmap *hb, const void *obj)
-)
-{
-    assert(hb != NULL);
-
-    if (obj != NULL) {
-        const uintptr_t offset = (uintptr_t)obj - hb->base;
-        const size_t index = HB_OFFSET_TO_INDEX(offset);
-        return index < hb->bitsLen / sizeof(*hb->bits);
-    }
-    return false;
-}
-
-/*
- * Internal function; do not call directly.
- */
-HB_INLINE_PROTO(
-    unsigned long
-    _heapBitmapModifyObjectBit(HeapBitmap *hb, const void *obj,
-            bool setBit, bool returnOld)
-)
-{
-    const uintptr_t offset = (uintptr_t)obj - hb->base;
-    const size_t index = HB_OFFSET_TO_INDEX(offset);
-    const unsigned long mask = HB_OFFSET_TO_MASK(offset);
-
-    assert(hb->bits != NULL);
-    assert((uintptr_t)obj >= hb->base);
-    assert(index < hb->bitsLen / sizeof(*hb->bits));
-
-    if (setBit) {
-        if ((uintptr_t)obj > hb->max) {
-            hb->max = (uintptr_t)obj;
-        }
-        if (returnOld) {
-            unsigned long *p = hb->bits + index;
-            const unsigned long word = *p;
-            *p |= mask;
-            return word & mask;
-        } else {
-            hb->bits[index] |= mask;
-        }
-    } else {
-        hb->bits[index] &= ~mask;
-    }
-    return false;
-}
-
-/*
- * Sets the bit corresponding to <obj>, and returns the previous value
- * of that bit (as zero or non-zero). Does no range checking to see if
- * <obj> is outside of the coverage of the bitmap.
- *
- * NOTE: casting this value to a bool is dangerous, because higher
- * set bits will be lost.
- */
-HB_INLINE_PROTO(
-    unsigned long
-    dvmHeapBitmapSetAndReturnObjectBit(HeapBitmap *hb, const void *obj)
-)
-{
-    return _heapBitmapModifyObjectBit(hb, obj, true, true);
-}
-
-/*
- * Sets the bit corresponding to <obj>, and widens the range of seen
- * pointers if necessary.  Does no range checking.
- */
-HB_INLINE_PROTO(
-    void
-    dvmHeapBitmapSetObjectBit(HeapBitmap *hb, const void *obj)
-)
-{
-    (void)_heapBitmapModifyObjectBit(hb, obj, true, false);
-}
-
-/*
- * Clears the bit corresponding to <obj>.  Does no range checking.
- */
-HB_INLINE_PROTO(
-    void
-    dvmHeapBitmapClearObjectBit(HeapBitmap *hb, const void *obj)
-)
-{
-    (void)_heapBitmapModifyObjectBit(hb, obj, false, false);
-}
-
-/*
- * Returns the current value of the bit corresponding to <obj>,
- * as zero or non-zero.  Does no range checking.
- *
- * NOTE: casting this value to a bool is dangerous, because higher
- * set bits will be lost.
- */
-HB_INLINE_PROTO(
-    unsigned long
-    dvmHeapBitmapIsObjectBitSet(const HeapBitmap *hb, const void *obj)
-)
-{
-    assert(dvmHeapBitmapCoversAddress(hb, obj));
-    assert(hb->bits != NULL);
-    assert((uintptr_t)obj >= hb->base);
-
-    if ((uintptr_t)obj <= hb->max) {
-        const uintptr_t offset = (uintptr_t)obj - hb->base;
-        return hb->bits[HB_OFFSET_TO_INDEX(offset)] & HB_OFFSET_TO_MASK(offset);
-    } else {
-        return 0;
-    }
-}
-
-#undef HB_INLINE_PROTO
-
-#endif  // _DALVIK_HEAP_BITMAP
+#endif /* _DALVIK_HEAP_BITMAP */
diff --git a/vm/alloc/HeapBitmapInlines.h b/vm/alloc/HeapBitmapInlines.h
new file mode 100644
index 0000000..859ffa3
--- /dev/null
+++ b/vm/alloc/HeapBitmapInlines.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DALVIK_HEAP_BITMAPINLINES
+#define _DALVIK_HEAP_BITMAPINLINES
+
+static unsigned long dvmHeapBitmapSetAndReturnObjectBit(HeapBitmap *hb, const void *obj) __attribute__((used));
+static void dvmHeapBitmapSetObjectBit(HeapBitmap *hb, const void *obj) __attribute__((used));
+static void dvmHeapBitmapClearObjectBit(HeapBitmap *hb, const void *obj) __attribute__((used));
+
+/*
+ * Internal function; do not call directly.
+ */
+static unsigned long _heapBitmapModifyObjectBit(HeapBitmap *hb, const void *obj,
+                                                bool setBit, bool returnOld)
+{
+    const uintptr_t offset = (uintptr_t)obj - hb->base;
+    const size_t index = HB_OFFSET_TO_INDEX(offset);
+    const unsigned long mask = HB_OFFSET_TO_MASK(offset);
+
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+    assert(index < hb->bitsLen / sizeof(*hb->bits));
+    if (setBit) {
+        if ((uintptr_t)obj > hb->max) {
+            hb->max = (uintptr_t)obj;
+        }
+        if (returnOld) {
+            unsigned long *p = hb->bits + index;
+            const unsigned long word = *p;
+            *p |= mask;
+            return word & mask;
+        } else {
+            hb->bits[index] |= mask;
+        }
+    } else {
+        hb->bits[index] &= ~mask;
+    }
+    return false;
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and returns the previous value
+ * of that bit (as zero or non-zero). Does no range checking to see if
+ * <obj> is outside of the coverage of the bitmap.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+static unsigned long dvmHeapBitmapSetAndReturnObjectBit(HeapBitmap *hb,
+                                                        const void *obj)
+{
+    return _heapBitmapModifyObjectBit(hb, obj, true, true);
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and widens the range of seen
+ * pointers if necessary.  Does no range checking.
+ */
+static void dvmHeapBitmapSetObjectBit(HeapBitmap *hb, const void *obj)
+{
+    _heapBitmapModifyObjectBit(hb, obj, true, false);
+}
+
+/*
+ * Clears the bit corresponding to <obj>.  Does no range checking.
+ */
+static void dvmHeapBitmapClearObjectBit(HeapBitmap *hb, const void *obj)
+{
+    _heapBitmapModifyObjectBit(hb, obj, false, false);
+}
+
+/*
+ * Returns the current value of the bit corresponding to <obj>,
+ * as zero or non-zero.  Does no range checking.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+static unsigned long dvmHeapBitmapIsObjectBitSet(const HeapBitmap *hb,
+                                                 const void *obj)
+{
+    assert(dvmHeapBitmapCoversAddress(hb, obj));
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+    if ((uintptr_t)obj <= hb->max) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        return hb->bits[HB_OFFSET_TO_INDEX(offset)] & HB_OFFSET_TO_MASK(offset);
+    } else {
+        return 0;
+    }
+}
+
+#endif /* _DALVIK_HEAP_BITMAPINLINES */
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
index 119c417..d10a417 100644
--- a/vm/alloc/HeapInternal.h
+++ b/vm/alloc/HeapInternal.h
@@ -99,6 +99,10 @@
     u1*             cardTableBase;
     size_t          cardTableLength;
 
+    /* GC's modified union table. */
+    u1*             modUnionTableBase;
+    size_t          modUnionTableLength;
+
     /* Is the GC running?  Used to avoid recursive calls to GC.
      */
     bool            gcRunning;
diff --git a/vm/alloc/HeapSource.c b/vm/alloc/HeapSource.c
index 764b2a3..197fb6c 100644
--- a/vm/alloc/HeapSource.c
+++ b/vm/alloc/HeapSource.c
@@ -24,6 +24,7 @@
 #include "alloc/HeapInternal.h"
 #include "alloc/HeapSource.h"
 #include "alloc/HeapBitmap.h"
+#include "alloc/HeapBitmapInlines.h"
 
 // TODO: find a real header file for these.
 extern int dlmalloc_trim(size_t);
@@ -33,11 +34,6 @@
 static void setIdealFootprint(size_t max);
 static size_t getMaximumSize(const HeapSource *hs);
 
-#define ALIGN_UP_TO_PAGE_SIZE(p) \
-    (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
-#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
-    ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
-
 #define HEAP_UTILIZATION_MAX        1024
 #define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
@@ -331,11 +327,35 @@
     return msp;
 }
 
-static bool
-addNewHeap(HeapSource *hs, mspace msp, size_t maximumSize)
+/*
+ * Add the initial heap.  Returns false if the initial heap was
+ * already added to the heap source.
+ */
+static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
+{
+    assert(hs != NULL);
+    assert(msp != NULL);
+    if (hs->numHeaps != 0) {
+        return false;
+    }
+    hs->heaps[0].msp = msp;
+    hs->heaps[0].maximumSize = maximumSize;
+    hs->heaps[0].concurrentStartBytes = SIZE_MAX;
+    hs->heaps[0].base = hs->heapBase;
+    hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
+    hs->numHeaps = 1;
+    return true;
+}
+
+/*
+ * Adds an additional heap to the heap source.  Returns false if there
+ * are too many heaps or insufficient free space to add another heap.
+ */
+static bool addNewHeap(HeapSource *hs)
 {
     Heap heap;
 
+    assert(hs != NULL);
     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
         LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
@@ -345,42 +365,37 @@
 
     memset(&heap, 0, sizeof(heap));
 
-    if (msp != NULL) {
-        heap.msp = msp;
-        heap.maximumSize = maximumSize;
-        heap.concurrentStartBytes = SIZE_MAX;
-        heap.base = hs->heapBase;
-        heap.limit = hs->heapBase + heap.maximumSize;
-    } else {
-        void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
-        char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
-        size_t overhead = base - hs->heaps[0].base;
+    /*
+     * Heap storage comes from a common virtual memory reservation.
+     * The new heap will start on the page after the old heap.
+     */
+    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
+    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
+    size_t overhead = base - hs->heaps[0].base;
+    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
 
-        assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
-        if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
-            LOGE_HEAP("No room to create any more heaps "
-                    "(%zd overhead, %zd max)\n",
-                    overhead, hs->maximumSize);
-            return false;
-        }
-        hs->heaps[0].maximumSize = overhead;
-        hs->heaps[0].limit = base;
-        heap.maximumSize = hs->growthLimit - overhead;
-        heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
-        heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
-        heap.base = base;
-        heap.limit = heap.base + heap.maximumSize;
-        if (heap.msp == NULL) {
-            return false;
-        }
+    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
+        LOGE_HEAP("No room to create any more heaps "
+                  "(%zd overhead, %zd max)",
+                  overhead, hs->maximumSize);
+        return false;
+    }
+
+    heap.maximumSize = hs->growthLimit - overhead;
+    heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
+    heap.base = base;
+    heap.limit = heap.base + heap.maximumSize;
+    heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
+    if (heap.msp == NULL) {
+        return false;
     }
 
     /* Don't let the soon-to-be-old heap grow any further.
      */
-    if (hs->numHeaps > 0) {
-        mspace msp = hs->heaps[0].msp;
-        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
-    }
+    hs->heaps[0].maximumSize = overhead;
+    hs->heaps[0].limit = base;
+    mspace msp = hs->heaps[0].msp;
+    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
 
     /* Put the new heap in the list, at heaps[0].
      * Shift existing heaps down.
@@ -507,14 +522,14 @@
 
     /* Allocate a descriptor from the heap we just created.
      */
-    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
+    gcHeap = (GcHeap *)mspace_malloc(msp, sizeof(*gcHeap));
     if (gcHeap == NULL) {
         LOGE_HEAP("Can't allocate heap descriptor\n");
         goto fail;
     }
     memset(gcHeap, 0, sizeof(*gcHeap));
 
-    hs = mspace_malloc(msp, sizeof(*hs));
+    hs = (HeapSource *)mspace_malloc(msp, sizeof(*hs));
     if (hs == NULL) {
         LOGE_HEAP("Can't allocate heap source\n");
         goto fail;
@@ -530,9 +545,9 @@
     hs->numHeaps = 0;
     hs->sawZygote = gDvm.zygote;
     hs->hasGcThread = false;
-    hs->heapBase = base;
+    hs->heapBase = (char *)base;
     hs->heapLength = length;
-    if (!addNewHeap(hs, msp, growthLimit)) {
+    if (!addInitialHeap(hs, msp, growthLimit)) {
         LOGE_HEAP("Can't add initial heap\n");
         goto fail;
     }
@@ -592,7 +607,8 @@
          */
         LOGV("Splitting out new zygote heap\n");
         gDvm.newZygoteHeapAllocated = true;
-        return addNewHeap(hs, NULL, 0);
+        dvmClearCardTable();
+        return addNewHeap(hs);
     }
     return true;
 }
@@ -679,46 +695,23 @@
     return total;
 }
 
-static void aliasBitmap(HeapBitmap *dst, HeapBitmap *src,
-                        uintptr_t base, uintptr_t max) {
-    size_t offset;
-
-    dst->base = base;
-    dst->max = max;
-    dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base) + sizeof(dst->bits);
-    /* The exclusive limit from bitsLen is greater than the inclusive max. */
-    assert(base + HB_MAX_OFFSET(dst) > max);
-    /* The exclusive limit is at most one word of bits beyond max. */
-    assert((base + HB_MAX_OFFSET(dst)) - max <=
-           HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD);
-    dst->allocLen = dst->bitsLen;
-    offset = base - src->base;
-    assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
-    dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
-}
-
-/*
- * Initializes a vector of object and mark bits to the object and mark
- * bits of each heap.  The bits are aliased to the heapsource
- * object and mark bitmaps.  This routine is used by the sweep code
- * which needs to free each object in the correct heap.
- */
-void dvmHeapSourceGetObjectBitmaps(HeapBitmap liveBits[], HeapBitmap markBits[],
-                                   size_t numHeaps)
+void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
+                             size_t numHeaps)
 {
     HeapSource *hs = gHs;
-    uintptr_t base, max;
     size_t i;
 
     HS_BOILERPLATE();
 
-    assert(numHeaps == hs->numHeaps);
-    for (i = 0; i < hs->numHeaps; ++i) {
-        base = (uintptr_t)hs->heaps[i].base;
-        /* -1 because limit is exclusive but max is inclusive. */
-        max = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
-        aliasBitmap(&liveBits[i], &hs->liveBits, base, max);
-        aliasBitmap(&markBits[i], &hs->markBits, base, max);
+    assert(numHeaps <= hs->numHeaps);
+    for (i = 0; i < numHeaps; ++i) {
+        base[i] = (uintptr_t)hs->heaps[i].base;
+        if (max != NULL) {
+            max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
+        }
+        if (limit != NULL) {
+            limit[i] = (uintptr_t)hs->heaps[i].limit;
+        }
     }
 }
 
@@ -732,6 +725,16 @@
     return &gHs->liveBits;
 }
 
+/*
+ * Get the bitmap representing all marked objects.
+ */
+HeapBitmap *dvmHeapSourceGetMarkBits(void)
+{
+    HS_BOILERPLATE();
+
+    return &gHs->markBits;
+}
+
 void dvmHeapSourceSwapBitmaps(void)
 {
     HeapBitmap tmp;
@@ -936,7 +939,7 @@
     heap = ptr2heap(gHs, *ptrs);
     numBytes = 0;
     if (heap != NULL) {
-        mspace *msp = heap->msp;
+        mspace msp = heap->msp;
         // Calling mspace_free on shared heaps disrupts sharing too
         // much. For heap[0] -- the 'active heap' -- we call
         // mspace_free, but on the other heaps we only do some
diff --git a/vm/alloc/HeapSource.h b/vm/alloc/HeapSource.h
index 22d8dab..be6d2e5 100644
--- a/vm/alloc/HeapSource.h
+++ b/vm/alloc/HeapSource.h
@@ -64,11 +64,12 @@
 void dvmHeapSourceShutdown(GcHeap **gcHeap);
 
 /*
- * Initializes a vector of object and mark bits to the object and mark
- * bits of each heap.
+ * Returns the base and inclusive max addresses of the heap source
+ * heaps.  The base and max values are suitable for passing directly
+ * to the bitmap sweeping routine.
  */
-void dvmHeapSourceGetObjectBitmaps(HeapBitmap liveBits[], HeapBitmap markBits[],
-                                   size_t numHeaps);
+void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, uintptr_t *limit,
+                             size_t numHeaps);
 
 /*
  * Get the bitmap representing all live objects.
@@ -76,6 +77,11 @@
 HeapBitmap *dvmHeapSourceGetLiveBits(void);
 
 /*
+ * Get the bitmap representing all marked objects.
+ */
+HeapBitmap *dvmHeapSourceGetMarkBits(void);
+
+/*
  * Gets the begining of the allocation for the HeapSource.
  */
 void *dvmHeapSourceGetBase(void);
diff --git a/vm/alloc/HeapTable.c b/vm/alloc/HeapTable.c
index 72cf791..6c9034a 100644
--- a/vm/alloc/HeapTable.c
+++ b/vm/alloc/HeapTable.c
@@ -23,27 +23,11 @@
 static const int kLargeHeapRefTableNElems = 1024;
 static const int  kFinalizableRefDefault = 128;
 
-void dvmHeapHeapTableFree(void *ptr)
+bool dvmHeapInitHeapRefTable(ReferenceTable *refs)
 {
-    free(ptr);
-}
-
-#define heapRefTableIsFull(refs) \
-    dvmIsReferenceTableFull(refs)
-
-bool dvmHeapInitHeapRefTable(HeapRefTable *refs)
-{
-    memset(refs, 0, sizeof(*refs));
     return dvmInitReferenceTable(refs, kFinalizableRefDefault, INT_MAX);
 }
 
-/* Frees the array inside the HeapRefTable, not the HeapRefTable itself.
- */
-void dvmHeapFreeHeapRefTable(HeapRefTable *refs)
-{
-    dvmClearReferenceTable(refs);
-}
-
 /*
  * Large, non-contiguous reference tables
  */
@@ -65,7 +49,7 @@
         /* Find an empty slot for this reference.
          */
         prevTable = NULL;
-        while (table != NULL && heapRefTableIsFull(&table->refs)) {
+        while (table != NULL && dvmIsReferenceTableFull(&table->refs)) {
             prevTable = table;
             table = table->next;
         }
@@ -88,7 +72,7 @@
 
     /* Allocate a new table.
      */
-    table = calloc(1, sizeof(LargeHeapRefTable));
+    table = (LargeHeapRefTable *)calloc(1, sizeof(LargeHeapRefTable));
     if (table == NULL) {
         LOGE_HEAP("Can't allocate a new large ref table\n");
         return false;
@@ -97,7 +81,7 @@
                                kLargeHeapRefTableNElems,
                                INT_MAX)) {
         LOGE_HEAP("Can't initialize a new large ref table\n");
-        dvmHeapHeapTableFree(table);
+        free(table);
         return false;
     }
 
@@ -111,19 +95,19 @@
      */
     assert(table == *tableP);
     assert(table != NULL);
-    assert(!heapRefTableIsFull(&table->refs));
+    assert(!dvmIsReferenceTableFull(&table->refs));
     *table->refs.nextEntry++ = ref;
 
     return true;
 }
 
-bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP, HeapRefTable *refs)
+bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP, ReferenceTable *refs)
 {
     LargeHeapRefTable *table;
 
     /* Allocate a node.
      */
-    table = calloc(1, sizeof(LargeHeapRefTable));
+    table = (LargeHeapRefTable *)calloc(1, sizeof(LargeHeapRefTable));
     if (table == NULL) {
         LOGE_HEAP("Can't allocate a new large ref table\n");
         return false;
@@ -144,8 +128,8 @@
 {
     while (table != NULL) {
         LargeHeapRefTable *next = table->next;
-        dvmHeapFreeHeapRefTable(&table->refs);
-        dvmHeapHeapTableFree(table);
+        dvmClearReferenceTable(&table->refs);
+        free(table);
         table = next;
     }
 }
@@ -160,7 +144,7 @@
     obj = NULL;
     table = *pTable;
     if (table != NULL) {
-        HeapRefTable *refs = &table->refs;
+        ReferenceTable *refs = &table->refs;
 
         /* We should never have an empty table node in the list.
          */
@@ -176,7 +160,7 @@
         if (refs->nextEntry == refs->table) {
             *pTable = table->next;
             dvmClearReferenceTable(refs);
-            dvmHeapHeapTableFree(table);
+            free(table);
         }
     }
 
diff --git a/vm/alloc/HeapTable.h b/vm/alloc/HeapTable.h
index 4f6034c..175111d 100644
--- a/vm/alloc/HeapTable.h
+++ b/vm/alloc/HeapTable.h
@@ -18,31 +18,19 @@
 
 #include "ReferenceTable.h"
 
-typedef ReferenceTable HeapRefTable;
-typedef struct LargeHeapRefTable LargeHeapRefTable;
-
 struct LargeHeapRefTable {
-    LargeHeapRefTable *next;
-    HeapRefTable refs;
+    struct LargeHeapRefTable *next;
+    ReferenceTable refs;
 };
 
-bool dvmHeapInitHeapRefTable(HeapRefTable *refs);
-void dvmHeapFreeHeapRefTable(HeapRefTable *refs);
+typedef struct LargeHeapRefTable LargeHeapRefTable;
+
+bool dvmHeapInitHeapRefTable(ReferenceTable *refs);
 void dvmHeapFreeLargeTable(LargeHeapRefTable *table);
-void dvmHeapHeapTableFree(void *ptr);
 bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref);
 void dvmHeapMarkLargeTableRefs(LargeHeapRefTable *table);
 bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP,
-        HeapRefTable *refs);
+        ReferenceTable *refs);
 Object *dvmHeapGetNextObjectFromLargeTable(LargeHeapRefTable **pTable);
 
-#define dvmHeapAddToHeapRefTable(refs, ptr) \
-            dvmAddToReferenceTable((refs), (ptr))
-
-#define dvmHeapNumHeapRefTableEntries(refs) \
-            dvmReferenceTableEntries(refs)
-
-#define dvmHeapRemoveFromHeapRefTable(refs, ptr) \
-            dvmRemoveFromReferenceTable((refs), (refs)->table, (ptr))
-
 #endif  // _DALVIK_ALLOC_HEAP_TABLE
diff --git a/vm/alloc/HeapWorker.c b/vm/alloc/HeapWorker.c
index 770025e..57089f4 100644
--- a/vm/alloc/HeapWorker.c
+++ b/vm/alloc/HeapWorker.c
@@ -128,7 +128,7 @@
         u8 delta = now - heapWorkerInterpStartTime;
 
         if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT &&
-            (gDvm.debuggerActive || gDvm.nativeDebuggerActive))
+            (DEBUGGER_ACTIVE || gDvm.nativeDebuggerActive))
         {
             /*
              * Debugger suspension can block the thread indefinitely.  For
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
index 10394f1..bd872df 100644
--- a/vm/alloc/MarkSweep.c
+++ b/vm/alloc/MarkSweep.c
@@ -15,38 +15,25 @@
  */
 
 #include "Dalvik.h"
-#include "alloc/clz.h"
 #include "alloc/CardTable.h"
 #include "alloc/HeapBitmap.h"
+#include "alloc/HeapBitmapInlines.h"
 #include "alloc/HeapInternal.h"
 #include "alloc/HeapSource.h"
 #include "alloc/MarkSweep.h"
 #include "alloc/Visit.h"
+#include "alloc/VisitInlines.h"
 #include <limits.h>     // for ULONG_MAX
 #include <sys/mman.h>   // for madvise(), mmap()
 #include <errno.h>
 
-#define GC_LOG_TAG      LOG_TAG "-gc"
-
-#if LOG_NDEBUG
-#define LOGD_GC(...)    ((void)0)
-#else
-#define LOGD_GC(...)    LOG(LOG_DEBUG, GC_LOG_TAG, __VA_ARGS__)
-#endif
-
-#define LOGE_GC(...)    LOG(LOG_ERROR, GC_LOG_TAG, __VA_ARGS__)
-
-#define ALIGN_DOWN(x, n) ((size_t)(x) & -(n))
-#define ALIGN_UP(x, n) (((size_t)(x) + (n) - 1) & ~((n) - 1))
-#define ALIGN_UP_TO_PAGE_SIZE(p) ALIGN_UP(p, SYSTEM_PAGE_SIZE)
-
 typedef unsigned long Word;
 const size_t kWordSize = sizeof(Word);
 
-/* Do not cast the result of this to a boolean; the only set bit
- * may be > 1<<8.
+/*
+ * Returns true if the given object is marked.
  */
-static long isMarked(const void *obj, const GcMarkContext *ctx)
+static bool isMarked(const Object *obj, const GcMarkContext *ctx)
 {
     return dvmHeapBitmapIsObjectBitSet(ctx->bitmap, obj);
 }
@@ -108,7 +95,7 @@
         return false;
     }
     ctx->finger = NULL;
-    ctx->immuneLimit = dvmHeapSourceGetImmuneLimit(isPartial);
+    ctx->immuneLimit = (char*)dvmHeapSourceGetImmuneLimit(isPartial);
     return true;
 }
 
@@ -154,21 +141,188 @@
 
 /*
  * Callback applied to root references during the initial root
- * marking.  Visited roots are always marked but are only pushed on
- * the mark stack if their address is below the finger.
+ * marking.  Marks white objects but does not push them on the mark
+ * stack.
  */
-static void rootMarkObjectVisitor(void *addr, RootType type, u4 thread, void *arg)
+static void rootMarkObjectVisitor(void *addr, RootType type, u4 thread,
+                                  void *arg)
 {
     Object *obj;
+    GcMarkContext *ctx;
 
     assert(addr != NULL);
     assert(arg != NULL);
     obj = *(Object **)addr;
+    ctx = (GcMarkContext *)arg;
     if (obj != NULL) {
-        markObjectNonNull(obj, arg, false);
+        markObjectNonNull(obj, ctx, false);
     }
 }
 
+/*
+ * Visits all objects that start on the given card.
+ */
+static void visitCard(Visitor *visitor, u1 *card, void *arg)
+{
+    assert(visitor != NULL);
+    assert(card != NULL);
+    assert(dvmIsValidCard(card));
+    u1 *addr= (u1*)dvmAddrFromCard(card);
+    u1 *limit = addr + GC_CARD_SIZE;
+    for (; addr < limit; addr += HB_OBJECT_ALIGNMENT) {
+        Object *obj = (Object *)addr;
+        GcMarkContext *ctx = &gDvm.gcHeap->markContext;
+        if (isMarked(obj, ctx)) {
+            (*visitor)(obj, arg);
+        }
+    }
+}
+
+/*
+ * Visits objects on dirty cards marked the mod union table.
+ */
+static void visitModUnionTable(Visitor *visitor, u1 *base, u1 *limit, void *arg)
+{
+    assert(visitor != NULL);
+    assert(base != NULL);
+    assert(limit != NULL);
+    assert(base <= limit);
+    u1 *heapBase = (u1*)dvmHeapSourceGetBase();
+    /* compute the start address in the bit table */
+    assert(base >= heapBase);
+    u4 *bits = (u4*)gDvm.gcHeap->modUnionTableBase;
+    /* compute the end address in the bit table */
+    size_t length = (limit - base) / GC_CARD_SIZE;
+    assert(length % sizeof(*bits) == 0);
+    length /= 4;
+    size_t i;
+    for (i = 0; i < length; ++i) {
+        if (bits[i] == 0) {
+            continue;
+        }
+        u4 word = bits[i];
+        bits[i] = 0;
+        size_t j = 0;
+        for (j = 0; j < sizeof(u4)*CHAR_BIT; ++j) {
+            if (word & (1 << j)) {
+                /* compute the base of the card */
+                size_t offset = (i*sizeof(u4)*CHAR_BIT + j) * GC_CARD_SIZE;
+                u1* addr = heapBase + offset;
+                u1* card = dvmCardFromAddr(addr);
+                /* visit all objects on the card */
+                visitCard(visitor, card, arg);
+            }
+        }
+    }
+}
+
+/*
+ * Visits objects on dirty cards marked in the card table.
+ */
+static void visitCardTable(Visitor *visitor, u1 *base, u1 *limit, void *arg)
+{
+    assert(visitor != NULL);
+    assert(base != NULL);
+    assert(limit != NULL);
+    u1 *start = dvmCardFromAddr(base);
+    u1 *end = dvmCardFromAddr(limit);
+    while (start < end) {
+        u1 *dirty = (u1 *)memchr(start, GC_CARD_DIRTY, end - start);
+        if (dirty == NULL) {
+            break;
+        }
+        assert(dirty >= start);
+        assert(dirty <= end);
+        assert(dvmIsValidCard(dirty));
+        visitCard(visitor, dirty, arg);
+        start = dirty + 1;
+    }
+}
+
+typedef struct {
+    Object *threatenBoundary;
+    Object *currObject;
+} ScanImmuneObjectContext;
+
+/*
+ * Marks the referent of an immune object it is threatened.
+ */
+static void scanImmuneObjectReferent(void *addr, void *arg)
+{
+    assert(addr != NULL);
+    assert(arg != NULL);
+    Object *obj = *(Object **)addr;
+    ScanImmuneObjectContext *ctx = (ScanImmuneObjectContext *)arg;
+    if (obj == NULL) {
+        return;
+    }
+    if (obj >= ctx->threatenBoundary) {
+        /* TODO: set a bit in the mod union table instead. */
+        dvmMarkCard(ctx->currObject);
+        markObjectNonNull(obj, &gDvm.gcHeap->markContext, false);
+   }
+}
+
+/*
+ * This function is poorly named, as is its callee.
+ */
+static void scanImmuneObject(void *addr, void *arg)
+{
+    ScanImmuneObjectContext *ctx = (ScanImmuneObjectContext *)arg;
+    Object *obj = (Object *)addr;
+    ctx->currObject = obj;
+    visitObject(scanImmuneObjectReferent, obj, arg);
+}
+
+/*
+ * Verifies that immune objects have their referents marked.
+ */
+static void verifyImmuneObjectsVisitor(void *addr, void *arg)
+{
+    assert(addr != NULL);
+    assert(arg != NULL);
+    Object *obj = *(Object **)addr;
+    GcMarkContext *ctx = (GcMarkContext *)arg;
+    if (obj == NULL || obj < (Object *)ctx->immuneLimit) {
+        return;
+    }
+    assert(dvmIsValidObject(obj));
+    if (!isMarked(obj, ctx)) {
+        LOGE("Immune reference %p points to a white threatened object %p",
+             addr, obj);
+        dvmAbort();
+    }
+}
+
+/*
+ * Visitor that searches for immune objects and verifies that all
+ * threatened referents are marked.
+ */
+static void verifyImmuneObjectsCallback(void *addr, void *arg)
+{
+    assert(addr != NULL);
+    assert(arg != NULL);
+    Object *obj = (Object *)addr;
+    GcMarkContext *ctx = (GcMarkContext *)arg;
+    if (obj->clazz == NULL) {
+        LOGI("uninitialized object @ %p (has null clazz pointer)", obj);
+        return;
+    }
+    if (obj < (Object *)ctx->immuneLimit) {
+        visitObject(verifyImmuneObjectsVisitor, obj, ctx);
+    }
+}
+
+/*
+ * Verify that immune objects refer to marked objects.
+ */
+static void verifyImmuneObjects()
+{
+    const HeapBitmap *bitmap = dvmHeapSourceGetLiveBits();
+    GcMarkContext *ctx = &gDvm.gcHeap->markContext;
+    dvmHeapBitmapWalk(bitmap, verifyImmuneObjectsCallback, ctx);
+}
+
 /* Mark the set of root objects.
  *
  * Things we need to scan:
@@ -195,6 +349,10 @@
  * - Native stack (for in-progress stuff in the VM)
  *   - The TrackedAlloc stuff watches all native VM references.
  */
+
+/*
+ * Blackens the root set.
+ */
 void dvmHeapMarkRootSet()
 {
     GcHeap *gcHeap = gDvm.gcHeap;
@@ -203,19 +361,21 @@
 }
 
 /*
- * Callback applied to root references during root remarking.  If the
- * root location contains a white reference it is pushed on the mark
- * stack and grayed.
+ * Callback applied to root references during root remarking.  Marks
+ * white objects and pushes them on the mark stack.
  */
-static void markObjectVisitor(void *addr, RootType type, u4 thread, void *arg)
+static void rootReMarkObjectVisitor(void *addr, RootType type, u4 thread,
+                                    void *arg)
 {
     Object *obj;
+    GcMarkContext *ctx;
 
     assert(addr != NULL);
     assert(arg != NULL);
     obj = *(Object **)addr;
+    ctx = (GcMarkContext *)arg;
     if (obj != NULL) {
-        markObjectNonNull(obj, arg, true);
+        markObjectNonNull(obj, ctx, true);
     }
 }
 
@@ -226,7 +386,7 @@
 {
     GcMarkContext *ctx = &gDvm.gcHeap->markContext;
     assert(ctx->finger == (void *)ULONG_MAX);
-    dvmVisitRoots(markObjectVisitor, ctx);
+    dvmVisitRoots(rootReMarkObjectVisitor, ctx);
 }
 
 /*
@@ -254,7 +414,8 @@
             int i;
             for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
                 void *addr = BYTE_OFFSET((Object *)obj, field->byteOffset);
-                markObject(((JValue *)addr)->l, ctx);
+                Object *ref = (Object *)((JValue *)addr)->l;
+                markObject(ref, ctx);
             }
         }
     }
@@ -272,7 +433,8 @@
     for (i = 0; i < clazz->sfieldCount; ++i) {
         char ch = clazz->sfields[i].field.signature[0];
         if (ch == '[' || ch == 'L') {
-            markObject(clazz->sfields[i].value.l, ctx);
+            Object *obj = (Object *)clazz->sfields[i].value.l;
+            markObject(obj, ctx);
         }
     }
 }
@@ -475,7 +637,9 @@
 {
     assert(obj != NULL);
     assert(ctx != NULL);
+    assert(isMarked(obj, ctx));
     assert(obj->clazz != NULL);
+    assert(isMarked(obj, ctx));
     if (obj->clazz == gDvm.classJavaLangClass) {
         scanClassObject(obj, ctx);
     } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
@@ -503,176 +667,31 @@
     }
 }
 
-static size_t objectSize(const Object *obj)
-{
-    assert(dvmIsValidObject(obj));
-    assert(dvmIsValidObject((Object *)obj->clazz));
-    if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
-        return dvmArrayObjectSize((ArrayObject *)obj);
-    } else if (obj->clazz == gDvm.classJavaLangClass) {
-        return dvmClassObjectSize((ClassObject *)obj);
-    } else {
-        return obj->clazz->objectSize;
-    }
-}
-
-/*
- * Scans forward to the header of the next marked object between start
- * and limit.  Returns NULL if no marked objects are in that region.
- */
-static Object *nextGrayObject(const u1 *base, const u1 *limit,
-                              const HeapBitmap *markBits)
-{
-    const u1 *ptr;
-
-    assert(base < limit);
-    assert(limit - base <= GC_CARD_SIZE);
-    for (ptr = base; ptr < limit; ptr += HB_OBJECT_ALIGNMENT) {
-        if (dvmHeapBitmapIsObjectBitSet(markBits, ptr))
-            return (Object *)ptr;
-    }
-    return NULL;
-}
-
-/*
- * Scans each byte from start below end returning the address of the
- * first dirty card.  Returns NULL if no dirty card is found.
- */
-static const u1 *scanBytesForDirtyCard(const u1 *start, const u1 *end)
-{
-    const u1 *ptr;
-
-    assert(start <= end);
-    for (ptr = start; ptr < end; ++ptr) {
-        if (*ptr == GC_CARD_DIRTY) {
-            return ptr;
-        }
-    }
-    return NULL;
-}
-
-/*
- * Like scanBytesForDirtyCard but scans the range from start below end
- * by words.  Assumes start and end are word aligned.
- */
-static const u1 *scanWordsForDirtyCard(const u1 *start, const u1 *end)
-{
-    const u1 *ptr;
-
-    assert((uintptr_t)start % kWordSize == 0);
-    assert((uintptr_t)end % kWordSize == 0);
-    assert(start <= end);
-    for (ptr = start; ptr < end; ptr += kWordSize) {
-        if (*(const Word *)ptr != 0) {
-            const u1 *dirty = scanBytesForDirtyCard(ptr, ptr + kWordSize);
-            if (dirty != NULL) {
-                return dirty;
-            }
-        }
-    }
-    return NULL;
-}
-
-/*
- * Scans the card table as quickly as possible looking for a dirty
- * card.  Returns the address of the first dirty card found or NULL if
- * no dirty cards were found.
- */
-static const u1 *nextDirtyCard(const u1 *start, const u1 *end)
-{
-    const u1 *wstart = (u1 *)ALIGN_UP(start, kWordSize);
-    const u1 *wend = (u1 *)ALIGN_DOWN(end, kWordSize);
-    const u1 *ptr, *dirty;
-
-    assert(start <= end);
-    assert(start <= wstart);
-    assert(end >= wend);
-    ptr = start;
-    if (wstart < end) {
-        /* Scan the leading unaligned bytes. */
-        dirty = scanBytesForDirtyCard(ptr, wstart);
-        if (dirty != NULL) {
-            return dirty;
-        }
-        /* Scan the range of aligned words. */
-        dirty = scanWordsForDirtyCard(wstart, wend);
-        if (dirty != NULL) {
-            return dirty;
-        }
-        ptr = wend;
-    }
-    /* Scan trailing unaligned bytes. */
-    dirty = scanBytesForDirtyCard(ptr, end);
-    if (dirty != NULL) {
-        return dirty;
-    }
-    return NULL;
-}
-
-/*
- * Scans range of dirty cards between start and end.  A range of dirty
- * cards is composed consecutively dirty cards or dirty cards spanned
- * by a gray object.  Returns the address of a clean card if the scan
- * reached a clean card or NULL if the scan reached the end.
- */
-const u1 *scanDirtyCards(const u1 *start, const u1 *end,
-                         GcMarkContext *ctx)
-{
-    const HeapBitmap *markBits = ctx->bitmap;
-    const u1 *card = start, *prevAddr = NULL;
-    while (card < end) {
-        if (*card != GC_CARD_DIRTY) {
-            return card;
-        }
-        const u1 *ptr = prevAddr ? prevAddr : dvmAddrFromCard(card);
-        const u1 *limit = ptr + GC_CARD_SIZE;
-        while (ptr < limit) {
-            Object *obj = nextGrayObject(ptr, limit, markBits);
-            if (obj == NULL) {
-                break;
-            }
-            scanObject(obj, ctx);
-            ptr = (u1*)obj + ALIGN_UP(objectSize(obj), HB_OBJECT_ALIGNMENT);
-        }
-        if (ptr < limit) {
-            /* Ended within the current card, advance to the next card. */
-            ++card;
-            prevAddr = NULL;
-        } else {
-            /* Ended past the current card, skip ahead. */
-            card = dvmCardFromAddr(ptr);
-            prevAddr = ptr;
-        }
-    }
-    return NULL;
-}
-
 /*
  * Blackens gray objects found on dirty cards.
  */
 static void scanGrayObjects(GcMarkContext *ctx)
 {
-    GcHeap *h = gDvm.gcHeap;
-    const u1 *base, *limit, *ptr, *dirty;
-    size_t footprint;
+    HeapBitmap *bitmap = ctx->bitmap;
+    u1 *base = (u1 *)bitmap->base;
+    u1 *limit = (u1 *)ALIGN_UP(bitmap->max, GC_CARD_SIZE);
+    visitCardTable((Visitor *)scanObject, base, limit, ctx);
+}
 
-    footprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
-    base = &h->cardTableBase[0];
-    limit = dvmCardFromAddr((u1 *)dvmHeapSourceGetBase() + footprint);
-    assert(limit <= &h->cardTableBase[h->cardTableLength]);
-
-    ptr = base;
-    for (;;) {
-        dirty = nextDirtyCard(ptr, limit);
-        if (dirty == NULL) {
-            break;
-        }
-        assert((dirty > ptr) && (dirty < limit));
-        ptr = scanDirtyCards(dirty, limit, ctx);
-        if (ptr == NULL) {
-            break;
-        }
-        assert((ptr > dirty) && (ptr < limit));
+/*
+ * Iterate through the immune objects and mark their referents.  Uses
+ * the mod union table to save scanning time.
+ */
+void dvmHeapScanImmuneObjects(const GcMarkContext *ctx)
+{
+    ScanImmuneObjectContext ctx2;
+    memset(&ctx2, 0, sizeof(ctx2));
+    ctx2.threatenBoundary = (Object*)ctx->immuneLimit;
+    visitModUnionTable(scanImmuneObject,
+                       (u1*)ctx->bitmap->base, (u1*)ctx->immuneLimit,
+                       (void *)&ctx2);
+    if (gDvm.verifyCardTable) {
+        verifyImmuneObjects();
     }
 }
 
@@ -683,31 +702,41 @@
  */
 static void scanBitmapCallback(void *addr, void *finger, void *arg)
 {
-    GcMarkContext *ctx = arg;
+    GcMarkContext *ctx = (GcMarkContext *)arg;
     ctx->finger = (void *)finger;
-    scanObject(addr, ctx);
+    scanObject((Object *)addr, ctx);
 }
 
 /* Given bitmaps with the root set marked, find and mark all
  * reachable objects.  When this returns, the entire set of
  * live objects will be marked and the mark stack will be empty.
  */
-void dvmHeapScanMarkedObjects(void)
+void dvmHeapScanMarkedObjects(bool isPartial)
 {
     GcMarkContext *ctx = &gDvm.gcHeap->markContext;
 
+    assert(ctx != NULL);
     assert(ctx->finger == NULL);
 
-    /* The bitmaps currently have bits set for the root set.
-     * Walk across the bitmaps and scan each object.
+    u1 *start;
+    if (isPartial && dvmHeapSourceGetNumHeaps() > 1) {
+        dvmHeapScanImmuneObjects(ctx);
+        start = (u1 *)ctx->immuneLimit;
+    } else {
+        start = (u1*)ctx->bitmap->base;
+    }
+    /*
+     * All objects reachable from the root set have a bit set in the
+     * mark bitmap.  Walk the mark bitmap and blacken these objects.
      */
-    dvmHeapBitmapScanWalk(ctx->bitmap, scanBitmapCallback, ctx);
+    dvmHeapBitmapScanWalk(ctx->bitmap,
+                          (uintptr_t)start, ctx->bitmap->max,
+                          scanBitmapCallback,
+                          ctx);
 
     ctx->finger = (void *)ULONG_MAX;
 
-    /* We've walked the mark bitmaps.  Scan anything that's
-     * left on the mark stack.
-     */
+    /* Process gray objects until the mark stack it is empty. */
     processMarkStack(ctx);
 }
 
@@ -848,7 +877,7 @@
  */
 static void scheduleFinalizations(void)
 {
-    HeapRefTable newPendingRefs;
+    ReferenceTable newPendingRefs;
     LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
     Object **ref;
     Object **lastRef;
@@ -868,8 +897,7 @@
         //      we can schedule them next time.  Watch out,
         //      because we may be expecting to free up space
         //      by calling finalizers.
-        LOGE_GC("scheduleFinalizations(): no room for "
-                "pending finalizations");
+        LOGE("scheduleFinalizations(): no room for pending finalizations");
         dvmAbort();
     }
 
@@ -885,12 +913,12 @@
         lastRef = finRefs->refs.nextEntry;
         while (ref < lastRef) {
             if (!isMarked(*ref, ctx)) {
-                if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
+                if (!dvmAddToReferenceTable(&newPendingRefs, *ref)) {
                     //TODO: add the current table and allocate
                     //      a new, smaller one.
-                    LOGE_GC("scheduleFinalizations(): "
-                            "no room for any more pending finalizations: %zd",
-                            dvmHeapNumHeapRefTableEntries(&newPendingRefs));
+                    LOGE("scheduleFinalizations(): "
+                         "no room for any more pending finalizations: %zd",
+                         dvmReferenceTableEntries(&newPendingRefs));
                     dvmAbort();
                 }
                 newPendCount++;
@@ -914,8 +942,7 @@
         totalPendCount += newPendCount;
         finRefs = finRefs->next;
     }
-    LOGD_GC("scheduleFinalizations(): %zd finalizers triggered.",
-            totalPendCount);
+    LOGV("scheduleFinalizations(): %zd finalizers triggered.", totalPendCount);
     if (totalPendCount == 0) {
         /* No objects required finalization.
          * Free the empty temporary table.
@@ -929,8 +956,7 @@
     if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
                 &newPendingRefs))
     {
-        LOGE_GC("scheduleFinalizations(): can't insert new "
-                "pending finalizations");
+        LOGE("scheduleFinalizations(): can't insert new pending finalizations");
         dvmAbort();
     }
 
@@ -952,6 +978,24 @@
 }
 
 /*
+ * This object is an instance of a class that overrides finalize().  Mark
+ * it as finalizable.
+ *
+ * This is called when Object.<init> completes normally.  It's also
+ * called for clones of finalizable objects.
+ */
+void dvmSetFinalizable(Object* obj)
+{
+    dvmLockHeap();
+    GcHeap* gcHeap = gDvm.gcHeap;
+    if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs, obj)) {
+        LOGE_HEAP("No room for any more finalizable objects");
+        dvmAbort();
+    }
+    dvmUnlockHeap();
+}
+
+/*
  * Process reference class instances and schedule finalizations.
  */
 void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
@@ -962,10 +1006,11 @@
     assert(weakReferences != NULL);
     assert(phantomReferences != NULL);
     /*
-     * Unless we are required to clear soft references with white
-     * references, preserve some white referents.
+     * Unless we are in the zygote or required to clear soft
+     * references with white references, preserve some white
+     * referents.
      */
-    if (!clearSoftRefs) {
+    if (!gDvm.zygote && !clearSoftRefs) {
         preserveSomeSoftReferences(softReferences);
     }
     /*
@@ -1022,7 +1067,7 @@
 
 static void sweepBitmapCallback(size_t numPtrs, void **ptrs, void *arg)
 {
-    SweepContext *ctx = arg;
+    SweepContext *ctx = (SweepContext *)arg;
 
     if (ctx->isConcurrent) {
         dvmLockHeap();
@@ -1038,10 +1083,23 @@
  * Returns true if the given object is unmarked.  This assumes that
  * the bitmaps have not yet been swapped.
  */
-static int isUnmarkedObject(void *object)
+static int isUnmarkedObject(void *obj)
 {
-    return !isMarked((void *)((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)),
-            &gDvm.gcHeap->markContext);
+    return !isMarked((Object *)obj, &gDvm.gcHeap->markContext);
+}
+
+void sweepWeakJniGlobals(void)
+{
+    IndirectRefTable *table = &gDvm.jniWeakGlobalRefTable;
+    Object **entry = table->table;
+    GcMarkContext *ctx = &gDvm.gcHeap->markContext;
+    int numEntries = dvmIndirectRefTableEntries(table);
+    int i;
+    for (i = 0; i < numEntries; ++i) {
+        if (entry[i] != NULL && !isMarked(entry[i], ctx)) {
+            entry[i] = NULL;
+        }
+    }
 }
 
 /*
@@ -1052,6 +1110,7 @@
 {
     dvmGcDetachDeadInternedStrings(isUnmarkedObject);
     dvmSweepMonitorList(&gDvm.monitorList, isUnmarkedObject);
+    sweepWeakJniGlobals();
 }
 
 /*
@@ -1061,26 +1120,28 @@
 void dvmHeapSweepUnmarkedObjects(bool isPartial, bool isConcurrent,
                                  size_t *numObjects, size_t *numBytes)
 {
-    HeapBitmap currMark[HEAP_SOURCE_MAX_HEAP_COUNT];
-    HeapBitmap currLive[HEAP_SOURCE_MAX_HEAP_COUNT];
+    uintptr_t base[HEAP_SOURCE_MAX_HEAP_COUNT];
+    uintptr_t max[HEAP_SOURCE_MAX_HEAP_COUNT];
     SweepContext ctx;
-    size_t numBitmaps, numSweepBitmaps;
+    HeapBitmap *prevLive, *prevMark;
+    size_t numHeaps, numSweepHeaps;
     size_t i;
 
-    numBitmaps = dvmHeapSourceGetNumHeaps();
-    dvmHeapSourceGetObjectBitmaps(currLive, currMark, numBitmaps);
+    numHeaps = dvmHeapSourceGetNumHeaps();
+    dvmHeapSourceGetRegions(base, max, NULL, numHeaps);
     if (isPartial) {
-        numSweepBitmaps = 1;
-        assert((uintptr_t)gDvm.gcHeap->markContext.immuneLimit == currLive[0].base);
+        assert((uintptr_t)gDvm.gcHeap->markContext.immuneLimit == base[0]);
+        numSweepHeaps = 1;
     } else {
-        numSweepBitmaps = numBitmaps;
+        numSweepHeaps = numHeaps;
     }
     ctx.numObjects = ctx.numBytes = 0;
     ctx.isConcurrent = isConcurrent;
-    for (i = 0; i < numSweepBitmaps; i++) {
-        HeapBitmap* prevLive = &currMark[i];
-        HeapBitmap* prevMark = &currLive[i];
-        dvmHeapBitmapSweepWalk(prevLive, prevMark, sweepBitmapCallback, &ctx);
+    prevLive = dvmHeapSourceGetMarkBits();
+    prevMark = dvmHeapSourceGetLiveBits();
+    for (i = 0; i < numSweepHeaps; ++i) {
+        dvmHeapBitmapSweepWalk(prevLive, prevMark, base[i], max[i],
+                               sweepBitmapCallback, &ctx);
     }
     *numObjects = ctx.numObjects;
     *numBytes = ctx.numBytes;
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
index 9b57f45..0672aa8 100644
--- a/vm/alloc/MarkSweep.h
+++ b/vm/alloc/MarkSweep.h
@@ -49,7 +49,7 @@
 bool dvmHeapBeginMarkStep(bool isPartial);
 void dvmHeapMarkRootSet(void);
 void dvmHeapReMarkRootSet(void);
-void dvmHeapScanMarkedObjects(void);
+void dvmHeapScanMarkedObjects(bool isPartial);
 void dvmHeapReScanMarkedObjects(void);
 void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
                               Object **weakReferences,
diff --git a/vm/alloc/TEST/HeapBitmapTest/Makefile b/vm/alloc/TEST/HeapBitmapTest/Makefile
index fe31b24..969eb63 100644
--- a/vm/alloc/TEST/HeapBitmapTest/Makefile
+++ b/vm/alloc/TEST/HeapBitmapTest/Makefile
@@ -10,12 +10,9 @@
 out/main.o: main.c ../../HeapBitmap.h
 	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
 
-out/HeapBitmap.o: ../../HeapBitmap.c ../../HeapBitmap.h ../../clz.h include/cutils/ashmem.h include/Dalvik.h
+out/HeapBitmap.o: ../../HeapBitmap.c ../../HeapBitmap.h include/cutils/ashmem.h include/Dalvik.h
 	$(CC) $(CFLAGS) -c $< -o $@ -I ../.. -I include
 
-out/clz.o: ../../clz.c ../../clz.h
-	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
-
 out/hbtest: out/main.o out/HeapBitmap.o out/clz.o
 	$(CC) $^ -o $@
 
diff --git a/vm/alloc/Verify.c b/vm/alloc/Verify.c
index 5ce692c..f36f44b 100644
--- a/vm/alloc/Verify.c
+++ b/vm/alloc/Verify.c
@@ -20,6 +20,11 @@
 #include "alloc/Verify.h"
 #include "alloc/Visit.h"
 
+/*
+ * Visitor applied to each reference field when searching for things
+ * that point to an object.  Sets the argument to NULL when a match is
+ * found.
+ */
 static void dumpReferencesVisitor(void *pObj, void *arg)
 {
     Object *obj = *(Object **)pObj;
@@ -29,19 +34,27 @@
     }
 }
 
+/*
+ * Visitor applied to each bitmap element to search for things that
+ * point to an object.  Logs a message when a match is found.
+ */
 static void dumpReferencesCallback(void *ptr, void *arg)
 {
-    Object *obj = arg;
+    Object *obj = (Object *)arg;
     if (ptr == obj) {
         return;
     }
-    dvmVisitObject(dumpReferencesVisitor, ptr, &obj);
+    dvmVisitObject(dumpReferencesVisitor, (Object *)ptr, &obj);
     if (obj == NULL) {
         LOGD("Found %p in the heap @ %p", arg, ptr);
-        dvmDumpObject(ptr);
+        dvmDumpObject((Object *)ptr);
     }
 }
 
+/*
+ * Visitor applied to each root to search for things that point to an
+ * object.  Logs a message when a match is found.
+ */
 static void dumpReferencesRootVisitor(void *ptr, u4 threadId,
                                       RootType type, void *arg)
 {
@@ -79,7 +92,7 @@
         isValid = dvmIsValidObject(obj);
     }
     if (!isValid) {
-        Object **parent = arg;
+        Object **parent = (Object **)arg;
         if (*parent != NULL) {
             LOGE("Verify of object %p failed", *parent);
             dvmDumpObject(*parent);
@@ -108,7 +121,7 @@
  */
 static void verifyBitmapCallback(void *ptr, void *arg)
 {
-    dvmVerifyObject(ptr);
+    dvmVerifyObject((Object *)ptr);
 }
 
 /*
diff --git a/vm/alloc/Visit.c b/vm/alloc/Visit.c
index a317955..7e44198 100644
--- a/vm/alloc/Visit.c
+++ b/vm/alloc/Visit.c
@@ -15,7 +15,6 @@
  */
 
 #include "Dalvik.h"
-#include "alloc/clz.h"
 #include "alloc/HeapInternal.h"
 #include "alloc/Visit.h"
 #include "alloc/VisitInlines.h"
@@ -83,6 +82,22 @@
 }
 
 /*
+ * Visits all entries in the indirect reference table.
+ */
+static void visitIndirectRefTable(RootVisitor *visitor, IndirectRefTable *table,
+                                  u4 threadId, RootType type, void *arg)
+{
+    assert(visitor != NULL);
+    assert(table != NULL);
+    Object **entry = table->table;
+    int numEntries = dvmIndirectRefTableEntries(table);
+    int i;
+    for (i = 0; i < numEntries; ++i) {
+        (*visitor)(&entry[i], threadId, type, arg);
+    }
+}
+
+/*
  * Visits a large heap reference table.  These objects are list heads.
  * As such, it is valid for table to be NULL.
  */
@@ -110,7 +125,7 @@
     assert(thread != NULL);
     threadId = thread->threadId;
     fp = (u4 *)thread->curFrame;
-    for (; fp != NULL; fp = saveArea->prevFrame) {
+    for (; fp != NULL; fp = (u4 *)saveArea->prevFrame) {
         Method *method;
         saveArea = SAVEAREA_FROM_FP(fp);
         method = (Method *)saveArea->method;
@@ -193,7 +208,7 @@
     (*visitor)(&thread->threadObj, threadId, ROOT_THREAD_OBJECT, arg);
     (*visitor)(&thread->exception, threadId, ROOT_NATIVE_STACK, arg);
     visitReferenceTable(visitor, &thread->internalLocalRefTable, threadId, ROOT_NATIVE_STACK, arg);
-    visitReferenceTable(visitor, &thread->jniLocalRefTable, threadId, ROOT_JNI_LOCAL, arg);
+    visitIndirectRefTable(visitor, &thread->jniLocalRefTable, threadId, ROOT_JNI_LOCAL, arg);
     if (thread->jniMonitorRefTable.table != NULL) {
         visitReferenceTable(visitor, &thread->jniMonitorRefTable, threadId, ROOT_JNI_MONITOR, arg);
     }
@@ -232,7 +247,7 @@
         visitHashTable(visitor, gDvm.literalStrings, ROOT_INTERNED_STRING, arg);
     }
     dvmLockMutex(&gDvm.jniGlobalRefLock);
-    visitReferenceTable(visitor, &gDvm.jniGlobalRefTable, 0, ROOT_JNI_GLOBAL, arg);
+    visitIndirectRefTable(visitor, &gDvm.jniGlobalRefTable, 0, ROOT_JNI_GLOBAL, arg);
     dvmUnlockMutex(&gDvm.jniGlobalRefLock);
     dvmLockMutex(&gDvm.jniPinRefLock);
     visitReferenceTable(visitor, &gDvm.jniPinRefTable, 0, ROOT_VM_INTERNAL, arg);
diff --git a/vm/alloc/VisitInlines.h b/vm/alloc/VisitInlines.h
index 959567e..7f90678 100644
--- a/vm/alloc/VisitInlines.h
+++ b/vm/alloc/VisitInlines.h
@@ -30,7 +30,7 @@
         while (refOffsets != 0) {
             size_t rshift = CLZ(refOffsets);
             size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
-            Object **ref = BYTE_OFFSET(obj, offset);
+            Object **ref = (Object **)BYTE_OFFSET(obj, offset);
             (*visitor)(ref, arg);
             refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
         }
@@ -41,7 +41,7 @@
             int i;
             for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
                 size_t offset = field->byteOffset;
-                Object **ref = BYTE_OFFSET(obj, offset);
+                Object **ref = (Object **)BYTE_OFFSET(obj, offset);
                 (*visitor)(ref, arg);
             }
         }
@@ -152,7 +152,7 @@
     assert(obj->clazz != NULL);
     visitDataObject(visitor, obj, arg);
     size_t offset = gDvm.offJavaLangRefReference_referent;
-    Object **ref = BYTE_OFFSET(obj, offset);
+    Object **ref = (Object **)BYTE_OFFSET(obj, offset);
     (*visitor)(ref, arg);
 }
 
diff --git a/vm/alloc/clz.c b/vm/alloc/clz.c
deleted file mode 100644
index 3488975..0000000
--- a/vm/alloc/clz.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "clz.h"
-
-/*
- * Implementation of CLZ; intended to mimic gcc's __builtin_clz.
- *
- * Returns the number of leading zero bits, starting at the most
- * significant bit position.  If the argument is zero, the result is
- * undefined.
- *
- * (For best results, this file should always be compiled for ARM, not THUMB.)
- */
-int dvmClzImpl(unsigned int x)
-{
-#ifdef HAVE_BUILTIN_CLZ
-    /*
-     * This file was compiled with flags that allow it to use the built-in
-     * CLZ (e.g. ARM mode for ARMv5 or later).
-     */
-    return __builtin_clz(x);
-#else
-    /*
-     * Built-in version not available.
-     */
-    if (!x) return 32;
-    int e = 31;
-    if (x&0xFFFF0000)   { e -=16; x >>=16; }
-    if (x&0x0000FF00)   { e -= 8; x >>= 8; }
-    if (x&0x000000F0)   { e -= 4; x >>= 4; }
-    if (x&0x0000000C)   { e -= 2; x >>= 2; }
-    if (x&0x00000002)   { e -= 1; }
-    return e;
-#endif
-}
diff --git a/vm/alloc/clz.h b/vm/alloc/clz.h
deleted file mode 100644
index 77fa6d4..0000000
--- a/vm/alloc/clz.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Implementation of clz(), which returns the number of leading zero bits,
- * starting at the most significant bit position.  If the argument is zero,
- * the result is undefined.
- *
- * On some platforms, gcc provides a __builtin_clz() function that uses
- * an optimized implementation (e.g. the CLZ instruction on ARM).
- *
- * This gets a little tricky for ARM, because it's only available in ARMv5
- * and above, and even on ARMv5 it's not available for THUMB code.  So we
- * need to tailor this for every source file.
- */
-#ifndef _DALVIK_CLZ
-
-#if defined(__arm__) && !defined(__thumb__)
-# include <machine/cpu-features.h>
-# if defined(__ARM_HAVE_CLZ)
-#  define CLZ(x) __builtin_clz(x)
-#  define HAVE_BUILTIN_CLZ
-# endif
-#endif
-
-#ifndef HAVE_BUILTIN_CLZ
-# define CLZ(x) dvmClzImpl(x)
-int dvmClzImpl(unsigned int x);
-#endif
-
-#endif // _DALVIK_CLZ
diff --git a/vm/analysis/CodeVerify.c b/vm/analysis/CodeVerify.c
index 8bb0da0..37a8951 100644
--- a/vm/analysis/CodeVerify.c
+++ b/vm/analysis/CodeVerify.c
@@ -22,6 +22,7 @@
  * some string-peeling and wouldn't need to compute hashes.
  */
 #include "Dalvik.h"
+#include "analysis/Liveness.h"
 #include "analysis/CodeVerify.h"
 #include "analysis/Optimize.h"
 #include "analysis/RegisterMap.h"
@@ -55,30 +56,10 @@
 # define DEAD_CODE_SCAN  false
 #endif
 
-static bool gDebugVerbose = false;      // TODO: remove this
+static bool gDebugVerbose = false;
 
-#if 0
-int gDvm__totalInstr = 0;
-int gDvm__gcInstr = 0;
-int gDvm__gcData = 0;
-int gDvm__gcSimpleData = 0;
-#endif
-
-/*
- * Selectively enable verbose debug logging -- use this to activate
- * dumpRegTypes() calls for all instructions in the specified method.
- */
-static inline bool doVerboseLogging(const Method* meth) {
-    return false;       /* COMMENT OUT to enable verbose debugging */
-
-    const char* cd = "Lcom/android/bluetooth/opp/BluetoothOppService;";
-    const char* mn = "scanFile";
-    const char* sg = "(Landroid/database/Cursor;I)Z";
-    return (strcmp(meth->clazz->descriptor, cd) == 0 &&
-            dvmCompareNameDescriptorAndMethod(mn, sg, meth) == 0);
-}
-
-#define SHOW_REG_DETAILS    (0 /*| DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS*/)
+#define SHOW_REG_DETAILS \
+    (0 | DRT_SHOW_LIVENESS /*| DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS*/)
 
 /*
  * We need an extra "pseudo register" to hold the return type briefly.  It
@@ -131,13 +112,12 @@
     const DecodedInstruction* pDecInsn, VerifyError* pFailure);
 static void verifyRegisterType(const RegisterLine* registerLine, \
     u4 vsrc, RegType checkType, VerifyError* pFailure);
-static bool doCodeVerification(const Method* meth, InsnFlags* insnFlags,\
-    RegisterTable* regTable, UninitInstanceMap* uninitMap);
+static bool doCodeVerification(VerifierData* vdata, RegisterTable* regTable);
 static bool verifyInstruction(const Method* meth, InsnFlags* insnFlags,\
     RegisterTable* regTable, int insnIdx, UninitInstanceMap* uninitMap,
     int* pStartGuess);
 static ClassObject* findCommonSuperclass(ClassObject* c1, ClassObject* c2);
-static void dumpRegTypes(const Method* meth, const InsnFlags* insnFlags,\
+static void dumpRegTypes(const VerifierData* vdata, \
     const RegisterLine* registerLine, int addr, const char* addrName,
     const UninitInstanceMap* uninitMap, int displayFlags);
 
@@ -146,6 +126,7 @@
     DRT_SIMPLE          = 0,
     DRT_SHOW_REF_TYPES  = 0x01,
     DRT_SHOW_LOCALS     = 0x02,
+    DRT_SHOW_LIVENESS   = 0x04,
 };
 
 
@@ -427,7 +408,7 @@
      */
     int size = offsetof(UninitInstanceMap, map) +
                 newInstanceCount * sizeof(uninitMap->map[0]);
-    uninitMap = calloc(1, size);
+    uninitMap = (UninitInstanceMap*)calloc(1, size);
     if (uninitMap == NULL)
         return NULL;
     uninitMap->numEntries = newInstanceCount;
@@ -443,7 +424,8 @@
     for (addr = 0; addr < insnsSize; /**/) {
         int width = dvmInsnGetWidth(insnFlags, addr);
 
-        if ((*insns & 0xff) == OP_NEW_INSTANCE)
+        Opcode opcode = dexOpcodeFromCodeUnit(*insns);
+        if (opcode == OP_NEW_INSTANCE || opcode == OP_NEW_INSTANCE_JUMBO)
             uninitMap->map[idx++].addr = addr;
 
         addr += width;
@@ -2560,8 +2542,8 @@
     } else {
         if (gDebugVerbose) {
             LOGVV("MERGE into 0x%04x\n", nextInsn);
-            //dumpRegTypes(meth, insnFlags, targetRegs, 0, "targ", NULL, 0);
-            //dumpRegTypes(meth, insnFlags, workRegs, 0, "work", NULL, 0);
+            //dumpRegTypes(vdata, targetRegs, 0, "targ", NULL, 0);
+            //dumpRegTypes(vdata, workRegs, 0, "work", NULL, 0);
         }
         /* merge registers, set Changed only if different */
         RegisterLine* targetLine = getRegisterLine(regTable, nextInsn);
@@ -2605,7 +2587,7 @@
 
         if (gDebugVerbose) {
             //LOGI(" RESULT (changed=%d)\n", changed);
-            //dumpRegTypes(meth, insnFlags, targetRegs, 0, "rslt", NULL, 0);
+            //dumpRegTypes(vdata, targetRegs, 0, "rslt", NULL, 0);
         }
 #ifdef VERIFIER_STATS
         gDvm.verifierStats.mergeRegCount++;
@@ -2905,7 +2887,7 @@
                 foundPossibleHandler = true;
 
                 if (handler->typeIdx == kDexNoIndex)
-                    clazz = gDvm.classJavaLangThrowable;
+                    clazz = gDvm.exThrowable;
                 else
                     clazz = dvmOptResolveClass(meth->clazz, handler->typeIdx,
                                 &localFailure);
@@ -2972,8 +2954,8 @@
  * what's in which register, but for verification purposes we only need to
  * store it at branch target addresses (because we merge into that).
  *
- * By zeroing out the storage we are effectively initializing the register
- * information to kRegTypeUnknown.
+ * By zeroing out the regType storage we are effectively initializing the
+ * register information to kRegTypeUnknown.
  *
  * We jump through some hoops here to minimize the total number of
  * allocations we have to perform per method verified.
@@ -3067,8 +3049,11 @@
 
     /*
      * Populate the sparse register line table.
+     *
+     * There is a RegisterLine associated with every address, but not
+     * every RegisterLine has non-NULL pointers to storage for its fields.
      */
-    u1* storage = regTable->lineAlloc;
+    u1* storage = (u1*)regTable->lineAlloc;
     for (i = 0; i < insnsSize; i++) {
         bool interesting;
 
@@ -3112,6 +3097,24 @@
 }
 
 /*
+ * Free up any "hairy" structures associated with register lines.
+ */
+static void freeRegisterLineInnards(VerifierData* vdata)
+{
+    unsigned int idx;
+
+    if (vdata->registerLines == NULL)
+        return;
+
+    for (idx = 0; idx < vdata->insnsSize; idx++) {
+        BitVector* liveRegs = vdata->registerLines[idx].liveRegs;
+        if (liveRegs != NULL)
+            dvmFreeBitVector(liveRegs);
+    }
+}
+
+
+/*
  * Verify that the arguments in a filled-new-array instruction are valid.
  *
  * "resClass" is the class refered to by pDecInsn->vB.
@@ -3167,7 +3170,10 @@
  * The throw-verification-error instruction requires two code units.  Some
  * of the replaced instructions require three; the third code unit will
  * receive a "nop".  The instruction's length will be left unchanged
- * in "insnFlags".
+ * in "insnFlags".  If the erroring instruction is a jumbo instruction,
+ * the throw-verification-error-jumbo instruction requires four code units.
+ * Some jumbo instructions require five, and the fifth code unit will become
+ * a "nop".
  *
  * The VM postpones setting of debugger breakpoints in unverified classes,
  * so there should be no clashes with the debugger.
@@ -3178,22 +3184,19 @@
     int insnIdx, VerifyError failure)
 {
     VerifyErrorRefType refType;
-    const u2* oldInsns = meth->insns + insnIdx;
-    u2 oldInsn = *oldInsns;
+    u2* oldInsns = (u2*) meth->insns + insnIdx;
     bool result = false;
 
     if (gDvm.optimizing)
         LOGD("Weird: RFI during dexopt?");
 
-    //LOGD("  was 0x%04x\n", oldInsn);
-    u2* newInsns = (u2*) meth->insns + insnIdx;
-
     /*
      * Generate the new instruction out of the old.
      *
      * First, make sure this is an instruction we're expecting to stomp on.
      */
-    switch (oldInsn & 0xff) {
+    Opcode opcode = dexOpcodeFromCodeUnit(*oldInsns);
+    switch (opcode) {
     case OP_CONST_CLASS:                // insn[1] == class ref, 2 bytes
     case OP_CHECK_CAST:
     case OP_INSTANCE_OF:
@@ -3201,6 +3204,12 @@
     case OP_NEW_ARRAY:
     case OP_FILLED_NEW_ARRAY:           // insn[1] == class ref, 3 bytes
     case OP_FILLED_NEW_ARRAY_RANGE:
+    case OP_CONST_CLASS_JUMBO:          // insn[1/2] == class ref, 4 bytes
+    case OP_CHECK_CAST_JUMBO:
+    case OP_NEW_INSTANCE_JUMBO:
+    case OP_INSTANCE_OF_JUMBO:          // insn[1/2] == class ref, 5 bytes
+    case OP_NEW_ARRAY_JUMBO:
+    case OP_FILLED_NEW_ARRAY_JUMBO:
         refType = VERIFY_ERROR_REF_CLASS;
         break;
 
@@ -3232,6 +3241,34 @@
     case OP_SPUT_SHORT:
     case OP_SPUT_WIDE:
     case OP_SPUT_OBJECT:
+    case OP_SGET_JUMBO:                 // insn[1/2] == field ref, 4 bytes
+    case OP_SGET_BOOLEAN_JUMBO:
+    case OP_SGET_BYTE_JUMBO:
+    case OP_SGET_CHAR_JUMBO:
+    case OP_SGET_SHORT_JUMBO:
+    case OP_SGET_WIDE_JUMBO:
+    case OP_SGET_OBJECT_JUMBO:
+    case OP_SPUT_JUMBO:
+    case OP_SPUT_BOOLEAN_JUMBO:
+    case OP_SPUT_BYTE_JUMBO:
+    case OP_SPUT_CHAR_JUMBO:
+    case OP_SPUT_SHORT_JUMBO:
+    case OP_SPUT_WIDE_JUMBO:
+    case OP_SPUT_OBJECT_JUMBO:
+    case OP_IGET_JUMBO:                 // insn[1/2] == field ref, 5 bytes
+    case OP_IGET_BOOLEAN_JUMBO:
+    case OP_IGET_BYTE_JUMBO:
+    case OP_IGET_CHAR_JUMBO:
+    case OP_IGET_SHORT_JUMBO:
+    case OP_IGET_WIDE_JUMBO:
+    case OP_IGET_OBJECT_JUMBO:
+    case OP_IPUT_JUMBO:
+    case OP_IPUT_BOOLEAN_JUMBO:
+    case OP_IPUT_BYTE_JUMBO:
+    case OP_IPUT_CHAR_JUMBO:
+    case OP_IPUT_SHORT_JUMBO:
+    case OP_IPUT_WIDE_JUMBO:
+    case OP_IPUT_OBJECT_JUMBO:
         refType = VERIFY_ERROR_REF_FIELD;
         break;
 
@@ -3245,25 +3282,34 @@
     case OP_INVOKE_STATIC_RANGE:
     case OP_INVOKE_INTERFACE:
     case OP_INVOKE_INTERFACE_RANGE:
+    case OP_INVOKE_VIRTUAL_JUMBO:       // insn[1/2] == method ref, 5 bytes
+    case OP_INVOKE_SUPER_JUMBO:
+    case OP_INVOKE_DIRECT_JUMBO:
+    case OP_INVOKE_STATIC_JUMBO:
+    case OP_INVOKE_INTERFACE_JUMBO:
         refType = VERIFY_ERROR_REF_METHOD;
         break;
 
     default:
         /* could handle this in a generic way, but this is probably safer */
-        LOG_VFY("GLITCH: verifier asked to replace opcode 0x%02x\n",
-            oldInsn & 0xff);
+        LOG_VFY("GLITCH: verifier asked to replace opcode 0x%02x\n", opcode);
         goto bail;
     }
 
+    assert((dexGetFlagsFromOpcode(opcode) & kInstrCanThrow) != 0);
+
     /* write a NOP over the third code unit, if necessary */
     int width = dvmInsnGetWidth(insnFlags, insnIdx);
     switch (width) {
     case 2:
+    case 4:
         /* nothing to do */
         break;
     case 3:
-        dvmDexChangeDex2(meth->clazz->pDvmDex, newInsns+2, OP_NOP);
-        //newInsns[2] = OP_NOP;
+        dvmUpdateCodeUnit(meth, oldInsns+2, OP_NOP);
+        break;
+    case 5:
+        dvmUpdateCodeUnit(meth, oldInsns+4, OP_NOP);
         break;
     default:
         /* whoops */
@@ -3272,11 +3318,22 @@
         dvmAbort();
     }
 
-    /* encode the opcode, with the failure code in the high byte */
-    u2 newVal = OP_THROW_VERIFICATION_ERROR |
-        (failure << 8) | (refType << (8 + kVerifyErrorRefTypeShift));
-    //newInsns[0] = newVal;
-    dvmDexChangeDex2(meth->clazz->pDvmDex, newInsns, newVal);
+    /* check for jumbo opcodes */
+    if (opcode > OP_DISPATCH_FF) {
+        /* replace opcode and failure code */
+        assert(width == 4 || width == 5);
+        u2 newVal = (u2) ((OP_THROW_VERIFICATION_ERROR_JUMBO << 8) |
+                           OP_DISPATCH_FF);
+        dvmUpdateCodeUnit(meth, oldInsns, newVal);
+        newVal = failure | (refType << kVerifyErrorRefTypeShift);
+        dvmUpdateCodeUnit(meth, oldInsns+3, newVal);
+    } else {
+        /* encode the opcode, with the failure code in the high byte */
+        assert(width == 2 || width == 3);
+        u2 newVal = OP_THROW_VERIFICATION_ERROR |
+            (failure << 8) | (refType << (8 + kVerifyErrorRefTypeShift));
+        dvmUpdateCodeUnit(meth, oldInsns, newVal);
+    }
 
     result = true;
 
@@ -3381,27 +3438,6 @@
     /* only need to do this if the table was updated */
     checkMergeTab();
 #endif
-
-    /*
-     * We rely on these for verification of const-class, const-string,
-     * and throw instructions.  Make sure we have them loaded.
-     */
-    if (gDvm.classJavaLangClass == NULL)
-        gDvm.classJavaLangClass =
-            dvmFindSystemClassNoInit("Ljava/lang/Class;");
-    if (gDvm.classJavaLangString == NULL)
-        gDvm.classJavaLangString =
-            dvmFindSystemClassNoInit("Ljava/lang/String;");
-    if (gDvm.classJavaLangThrowable == NULL) {
-        gDvm.classJavaLangThrowable =
-            dvmFindSystemClassNoInit("Ljava/lang/Throwable;");
-        gDvm.offJavaLangThrowable_cause =
-            dvmFindFieldOffset(gDvm.classJavaLangThrowable,
-                "cause", "Ljava/lang/Throwable;");
-    }
-    if (gDvm.classJavaLangObject == NULL)
-        gDvm.classJavaLangObject =
-            dvmFindSystemClassNoInit("Ljava/lang/Object;");
 }
 
 /*
@@ -3442,7 +3478,34 @@
             generateRegisterMap ? kTrackRegsGcPoints : kTrackRegsBranches))
         goto bail;
 
-    vdata->registerLines = NULL;     /* don't set this until we need it */
+    vdata->registerLines = regTable.registerLines;
+
+    /*
+     * Perform liveness analysis.
+     *
+     * We can do this before or after the main verifier pass.  The choice
+     * affects whether or not we see the effects of verifier instruction
+     * changes, i.e. substitution of throw-verification-error.
+     *
+     * In practice the ordering doesn't really matter, because T-V-E
+     * just prunes "can continue", creating regions of dead code (with
+     * corresponding register map data that will never be used).
+     */
+    if (generateRegisterMap &&
+        gDvm.registerMapMode == kRegisterMapModeLivePrecise)
+    {
+        /*
+         * Compute basic blocks and predecessor lists.
+         */
+        if (!dvmComputeVfyBasicBlocks(vdata))
+            goto bail;
+
+        /*
+         * Compute liveness.
+         */
+        if (!dvmComputeLiveness(vdata))
+            goto bail;
+    }
 
     /*
      * Initialize the types of the registers that correspond to the
@@ -3455,16 +3518,13 @@
     /*
      * Run the verifier.
      */
-    if (!doCodeVerification(meth, vdata->insnFlags, &regTable,
-            vdata->uninitMap))
+    if (!doCodeVerification(vdata, &regTable))
         goto bail;
 
     /*
      * Generate a register map.
      */
     if (generateRegisterMap) {
-        vdata->registerLines = regTable.registerLines;
-
         RegisterMap* pMap = dvmGenerateRegisterMapV(vdata);
         if (pMap != NULL) {
             /*
@@ -3482,6 +3542,7 @@
     result = true;
 
 bail:
+    freeRegisterLineInnards(vdata);
     free(regTable.registerLines);
     free(regTable.lineAlloc);
     return result;
@@ -3537,9 +3598,11 @@
  * instruction if a register contains an uninitialized instance created
  * by that same instrutcion.
  */
-static bool doCodeVerification(const Method* meth, InsnFlags* insnFlags,
-    RegisterTable* regTable, UninitInstanceMap* uninitMap)
+static bool doCodeVerification(VerifierData* vdata, RegisterTable* regTable)
 {
+    const Method* meth = vdata->method;
+    InsnFlags* insnFlags = vdata->insnFlags;
+    UninitInstanceMap* uninitMap = vdata->uninitMap;
     const int insnsSize = dvmGetMethodInsnsSize(meth);
     bool result = false;
     bool debugVerbose = false;
@@ -3550,7 +3613,7 @@
      */
     dvmInsnSetChanged(insnFlags, 0, true);
 
-    if (doVerboseLogging(meth)) {
+    if (dvmWantVerboseVerification(meth)) {
         IF_LOGI() {
             char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
             LOGI("Now verifying: %s.%s %s (ins=%d regs=%d)\n",
@@ -3620,15 +3683,15 @@
                 LOG_VFY("HUH? workLine diverged in %s.%s %s\n",
                         meth->clazz->descriptor, meth->name, desc);
                 free(desc);
-                dumpRegTypes(meth, insnFlags, registerLine, 0, "work",
+                dumpRegTypes(vdata, registerLine, 0, "work",
                     uninitMap, DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS);
-                dumpRegTypes(meth, insnFlags, registerLine, 0, "insn",
+                dumpRegTypes(vdata, registerLine, 0, "insn",
                     uninitMap, DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS);
             }
 #endif
         }
         if (debugVerbose) {
-            dumpRegTypes(meth, insnFlags, &regTable->workLine, insnIdx,
+            dumpRegTypes(vdata, &regTable->workLine, insnIdx,
                 NULL, uninitMap, SHOW_REG_DETAILS);
         }
 
@@ -3763,7 +3826,7 @@
     RegisterLine* workLine = &regTable->workLine;
     const DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
     ClassObject* resClass;
-    int branchTarget = 0;
+    s4 branchTarget = 0;
     const int insnRegCount = meth->registersSize;
     RegType tmpType;
     DecodedInstruction decInsn;
@@ -3988,6 +4051,7 @@
             regTypeFromClass(gDvm.classJavaLangString));
         break;
     case OP_CONST_CLASS:
+    case OP_CONST_CLASS_JUMBO:
         assert(gDvm.classJavaLangClass != NULL);
         /* make sure we can resolve the class; access check is important */
         resClass = dvmOptResolveClass(meth->clazz, decInsn.vB, &failure);
@@ -4033,6 +4097,7 @@
         break;
 
     case OP_CHECK_CAST:
+    case OP_CHECK_CAST_JUMBO:
         /*
          * If this instruction succeeds, we will promote register vA to
          * the type in vB.  (This could be a demotion -- not expected, so
@@ -4061,6 +4126,7 @@
         }
         break;
     case OP_INSTANCE_OF:
+    case OP_INSTANCE_OF_JUMBO:
         /* make sure we're checking a reference type */
         tmpType = getRegisterType(workLine, decInsn.vB);
         if (!regTypeIsReference(tmpType)) {
@@ -4096,6 +4162,7 @@
         break;
 
     case OP_NEW_INSTANCE:
+    case OP_NEW_INSTANCE_JUMBO:
         resClass = dvmOptResolveClass(meth->clazz, decInsn.vB, &failure);
         if (resClass == NULL) {
             const char* badClassDesc = dexStringByTypeIdx(pDexFile, decInsn.vB);
@@ -4131,6 +4198,7 @@
         }
         break;
     case OP_NEW_ARRAY:
+    case OP_NEW_ARRAY_JUMBO:
         resClass = dvmOptResolveClass(meth->clazz, decInsn.vC, &failure);
         if (resClass == NULL) {
             const char* badClassDesc = dexStringByTypeIdx(pDexFile, decInsn.vC);
@@ -4150,6 +4218,7 @@
         break;
     case OP_FILLED_NEW_ARRAY:
     case OP_FILLED_NEW_ARRAY_RANGE:
+    case OP_FILLED_NEW_ARRAY_JUMBO:
         resClass = dvmOptResolveClass(meth->clazz, decInsn.vB, &failure);
         if (resClass == NULL) {
             const char* badClassDesc = dexStringByTypeIdx(pDexFile, decInsn.vB);
@@ -4161,7 +4230,8 @@
             LOG_VFY("VFY: filled-new-array on non-array class\n");
             failure = VERIFY_ERROR_GENERIC;
         } else {
-            bool isRange = (decInsn.opcode == OP_FILLED_NEW_ARRAY_RANGE);
+            bool isRange = (decInsn.opcode == OP_FILLED_NEW_ARRAY_RANGE ||
+                            decInsn.opcode == OP_FILLED_NEW_ARRAY_JUMBO);
 
             /* check the arguments to the instruction */
             verifyFilledNewArrayRegs(meth, workLine, &decInsn,
@@ -4194,7 +4264,7 @@
     case OP_THROW:
         resClass = getClassFromRegister(workLine, decInsn.vA, &failure);
         if (VERIFY_OK(failure) && resClass != NULL) {
-            if (!dvmInstanceof(resClass, gDvm.classJavaLangThrowable)) {
+            if (!dvmInstanceof(resClass, gDvm.exThrowable)) {
                 LOG_VFY("VFY: thrown class %s not instanceof Throwable\n",
                         resClass->descriptor);
                 failure = VERIFY_ERROR_GENERIC;
@@ -4672,19 +4742,23 @@
         break;
 
     case OP_IGET:
-    case OP_IGET_VOLATILE:
+    case OP_IGET_JUMBO:
         tmpType = kRegTypeInteger;
         goto iget_1nr_common;
     case OP_IGET_BOOLEAN:
+    case OP_IGET_BOOLEAN_JUMBO:
         tmpType = kRegTypeBoolean;
         goto iget_1nr_common;
     case OP_IGET_BYTE:
+    case OP_IGET_BYTE_JUMBO:
         tmpType = kRegTypeByte;
         goto iget_1nr_common;
     case OP_IGET_CHAR:
+    case OP_IGET_CHAR_JUMBO:
         tmpType = kRegTypeChar;
         goto iget_1nr_common;
     case OP_IGET_SHORT:
+    case OP_IGET_SHORT_JUMBO:
         tmpType = kRegTypeShort;
         goto iget_1nr_common;
 iget_1nr_common:
@@ -4714,7 +4788,7 @@
         }
         break;
     case OP_IGET_WIDE:
-    case OP_IGET_WIDE_VOLATILE:
+    case OP_IGET_WIDE_JUMBO:
         {
             RegType dstType;
             InstField* instField;
@@ -4747,7 +4821,7 @@
         }
         break;
     case OP_IGET_OBJECT:
-    case OP_IGET_OBJECT_VOLATILE:
+    case OP_IGET_OBJECT_JUMBO:
         {
             ClassObject* fieldClass;
             InstField* instField;
@@ -4774,19 +4848,23 @@
         }
         break;
     case OP_IPUT:
-    case OP_IPUT_VOLATILE:
+    case OP_IPUT_JUMBO:
         tmpType = kRegTypeInteger;
         goto iput_1nr_common;
     case OP_IPUT_BOOLEAN:
+    case OP_IPUT_BOOLEAN_JUMBO:
         tmpType = kRegTypeBoolean;
         goto iput_1nr_common;
     case OP_IPUT_BYTE:
+    case OP_IPUT_BYTE_JUMBO:
         tmpType = kRegTypeByte;
         goto iput_1nr_common;
     case OP_IPUT_CHAR:
+    case OP_IPUT_CHAR_JUMBO:
         tmpType = kRegTypeChar;
         goto iput_1nr_common;
     case OP_IPUT_SHORT:
+    case OP_IPUT_SHORT_JUMBO:
         tmpType = kRegTypeShort;
         goto iput_1nr_common;
 iput_1nr_common:
@@ -4834,7 +4912,7 @@
         }
         break;
     case OP_IPUT_WIDE:
-    case OP_IPUT_WIDE_VOLATILE:
+    case OP_IPUT_WIDE_JUMBO:
         tmpType = getRegisterType(workLine, decInsn.vA);
         {
             RegType typeHi = getRegisterType(workLine, decInsn.vA+1);
@@ -4870,7 +4948,7 @@
         }
         break;
     case OP_IPUT_OBJECT:
-    case OP_IPUT_OBJECT_VOLATILE:
+    case OP_IPUT_OBJECT_JUMBO:
         {
             ClassObject* fieldClass;
             ClassObject* valueClass;
@@ -4926,19 +5004,23 @@
         break;
 
     case OP_SGET:
-    case OP_SGET_VOLATILE:
+    case OP_SGET_JUMBO:
         tmpType = kRegTypeInteger;
         goto sget_1nr_common;
     case OP_SGET_BOOLEAN:
+    case OP_SGET_BOOLEAN_JUMBO:
         tmpType = kRegTypeBoolean;
         goto sget_1nr_common;
     case OP_SGET_BYTE:
+    case OP_SGET_BYTE_JUMBO:
         tmpType = kRegTypeByte;
         goto sget_1nr_common;
     case OP_SGET_CHAR:
+    case OP_SGET_CHAR_JUMBO:
         tmpType = kRegTypeChar;
         goto sget_1nr_common;
     case OP_SGET_SHORT:
+    case OP_SGET_SHORT_JUMBO:
         tmpType = kRegTypeShort;
         goto sget_1nr_common;
 sget_1nr_common:
@@ -4971,7 +5053,7 @@
         }
         break;
     case OP_SGET_WIDE:
-    case OP_SGET_WIDE_VOLATILE:
+    case OP_SGET_WIDE_JUMBO:
         {
             StaticField* staticField;
             RegType dstType;
@@ -5001,7 +5083,7 @@
         }
         break;
     case OP_SGET_OBJECT:
-    case OP_SGET_OBJECT_VOLATILE:
+    case OP_SGET_OBJECT_JUMBO:
         {
             StaticField* staticField;
             ClassObject* fieldClass;
@@ -5025,19 +5107,23 @@
         }
         break;
     case OP_SPUT:
-    case OP_SPUT_VOLATILE:
+    case OP_SPUT_JUMBO:
         tmpType = kRegTypeInteger;
         goto sput_1nr_common;
     case OP_SPUT_BOOLEAN:
+    case OP_SPUT_BOOLEAN_JUMBO:
         tmpType = kRegTypeBoolean;
         goto sput_1nr_common;
     case OP_SPUT_BYTE:
+    case OP_SPUT_BYTE_JUMBO:
         tmpType = kRegTypeByte;
         goto sput_1nr_common;
     case OP_SPUT_CHAR:
+    case OP_SPUT_CHAR_JUMBO:
         tmpType = kRegTypeChar;
         goto sput_1nr_common;
     case OP_SPUT_SHORT:
+    case OP_SPUT_SHORT_JUMBO:
         tmpType = kRegTypeShort;
         goto sput_1nr_common;
 sput_1nr_common:
@@ -5087,7 +5173,7 @@
         }
         break;
     case OP_SPUT_WIDE:
-    case OP_SPUT_WIDE_VOLATILE:
+    case OP_SPUT_WIDE_JUMBO:
         tmpType = getRegisterType(workLine, decInsn.vA);
         {
             RegType typeHi = getRegisterType(workLine, decInsn.vA+1);
@@ -5120,7 +5206,7 @@
         }
         break;
     case OP_SPUT_OBJECT:
-    case OP_SPUT_OBJECT_VOLATILE:
+    case OP_SPUT_OBJECT_JUMBO:
         {
             ClassObject* fieldClass;
             ClassObject* valueClass;
@@ -5175,8 +5261,10 @@
 
     case OP_INVOKE_VIRTUAL:
     case OP_INVOKE_VIRTUAL_RANGE:
+    case OP_INVOKE_VIRTUAL_JUMBO:
     case OP_INVOKE_SUPER:
     case OP_INVOKE_SUPER_RANGE:
+    case OP_INVOKE_SUPER_JUMBO:
         {
             Method* calledMethod;
             RegType returnType;
@@ -5184,9 +5272,12 @@
             bool isSuper;
 
             isRange =  (decInsn.opcode == OP_INVOKE_VIRTUAL_RANGE ||
-                        decInsn.opcode == OP_INVOKE_SUPER_RANGE);
+                        decInsn.opcode == OP_INVOKE_VIRTUAL_JUMBO ||
+                        decInsn.opcode == OP_INVOKE_SUPER_RANGE ||
+                        decInsn.opcode == OP_INVOKE_SUPER_JUMBO);
             isSuper =  (decInsn.opcode == OP_INVOKE_SUPER ||
-                        decInsn.opcode == OP_INVOKE_SUPER_RANGE);
+                        decInsn.opcode == OP_INVOKE_SUPER_RANGE ||
+                        decInsn.opcode == OP_INVOKE_SUPER_JUMBO);
 
             calledMethod = verifyInvocationArgs(meth, workLine, insnRegCount,
                             &decInsn, uninitMap, METHOD_VIRTUAL, isRange,
@@ -5200,12 +5291,14 @@
         break;
     case OP_INVOKE_DIRECT:
     case OP_INVOKE_DIRECT_RANGE:
+    case OP_INVOKE_DIRECT_JUMBO:
         {
             RegType returnType;
             Method* calledMethod;
             bool isRange;
 
-            isRange =  (decInsn.opcode == OP_INVOKE_DIRECT_RANGE);
+            isRange =  (decInsn.opcode == OP_INVOKE_DIRECT_RANGE ||
+                        decInsn.opcode == OP_INVOKE_DIRECT_JUMBO);
             calledMethod = verifyInvocationArgs(meth, workLine, insnRegCount,
                             &decInsn, uninitMap, METHOD_DIRECT, isRange,
                             false, &failure);
@@ -5279,12 +5372,14 @@
         break;
     case OP_INVOKE_STATIC:
     case OP_INVOKE_STATIC_RANGE:
+    case OP_INVOKE_STATIC_JUMBO:
         {
             RegType returnType;
             Method* calledMethod;
             bool isRange;
 
-            isRange =  (decInsn.opcode == OP_INVOKE_STATIC_RANGE);
+            isRange =  (decInsn.opcode == OP_INVOKE_STATIC_RANGE ||
+                        decInsn.opcode == OP_INVOKE_STATIC_JUMBO);
             calledMethod = verifyInvocationArgs(meth, workLine, insnRegCount,
                             &decInsn, uninitMap, METHOD_STATIC, isRange,
                             false, &failure);
@@ -5298,12 +5393,14 @@
         break;
     case OP_INVOKE_INTERFACE:
     case OP_INVOKE_INTERFACE_RANGE:
+    case OP_INVOKE_INTERFACE_JUMBO:
         {
             RegType /*thisType,*/ returnType;
             Method* absMethod;
             bool isRange;
 
-            isRange =  (decInsn.opcode == OP_INVOKE_INTERFACE_RANGE);
+            isRange =  (decInsn.opcode == OP_INVOKE_INTERFACE_RANGE ||
+                        decInsn.opcode == OP_INVOKE_INTERFACE_JUMBO);
             absMethod = verifyInvocationArgs(meth, workLine, insnRegCount,
                             &decInsn, uninitMap, METHOD_INTERFACE, isRange,
                             false, &failure);
@@ -5594,6 +5691,7 @@
      * inserted in the course of verification, we can expect to see it here.
      */
     case OP_THROW_VERIFICATION_ERROR:
+    case OP_THROW_VERIFICATION_ERROR_JUMBO:
         break;
 
     /*
@@ -5630,7 +5728,6 @@
      */
     case OP_EXECUTE_INLINE:
     case OP_EXECUTE_INLINE_RANGE:
-    case OP_INVOKE_DIRECT_EMPTY:
     case OP_IGET_QUICK:
     case OP_IGET_WIDE_QUICK:
     case OP_IGET_OBJECT_QUICK:
@@ -5641,9 +5738,43 @@
     case OP_INVOKE_VIRTUAL_QUICK_RANGE:
     case OP_INVOKE_SUPER_QUICK:
     case OP_INVOKE_SUPER_QUICK_RANGE:
+        /* fall through to failure */
+
+    /*
+     * These instructions are equivalent (from the verifier's point of view)
+     * to the original form.  The change was made for correctness rather
+     * than improved performance (except for invoke-object-init, which
+     * provides both).  The substitution takes place after verification
+     * completes, though, so we don't expect to see them here.
+     */
+    case OP_INVOKE_OBJECT_INIT_RANGE:
+    case OP_INVOKE_OBJECT_INIT_JUMBO:
     case OP_RETURN_VOID_BARRIER:
-        failure = VERIFY_ERROR_GENERIC;
-        break;
+    case OP_IGET_VOLATILE:
+    case OP_IGET_VOLATILE_JUMBO:
+    case OP_IGET_WIDE_VOLATILE:
+    case OP_IGET_WIDE_VOLATILE_JUMBO:
+    case OP_IGET_OBJECT_VOLATILE:
+    case OP_IGET_OBJECT_VOLATILE_JUMBO:
+    case OP_IPUT_VOLATILE:
+    case OP_IPUT_VOLATILE_JUMBO:
+    case OP_IPUT_WIDE_VOLATILE:
+    case OP_IPUT_WIDE_VOLATILE_JUMBO:
+    case OP_IPUT_OBJECT_VOLATILE:
+    case OP_IPUT_OBJECT_VOLATILE_JUMBO:
+    case OP_SGET_VOLATILE:
+    case OP_SGET_VOLATILE_JUMBO:
+    case OP_SGET_WIDE_VOLATILE:
+    case OP_SGET_WIDE_VOLATILE_JUMBO:
+    case OP_SGET_OBJECT_VOLATILE:
+    case OP_SGET_OBJECT_VOLATILE_JUMBO:
+    case OP_SPUT_VOLATILE:
+    case OP_SPUT_VOLATILE_JUMBO:
+    case OP_SPUT_WIDE_VOLATILE:
+    case OP_SPUT_WIDE_VOLATILE_JUMBO:
+    case OP_SPUT_OBJECT_VOLATILE:
+    case OP_SPUT_OBJECT_VOLATILE_JUMBO:
+        /* fall through to failure */
 
     /* these should never appear during verification */
     case OP_UNUSED_3E:
@@ -5657,6 +5788,209 @@
     case OP_UNUSED_7A:
     case OP_BREAKPOINT:
     case OP_DISPATCH_FF:
+    case OP_UNUSED_27FF:
+    case OP_UNUSED_28FF:
+    case OP_UNUSED_29FF:
+    case OP_UNUSED_2AFF:
+    case OP_UNUSED_2BFF:
+    case OP_UNUSED_2CFF:
+    case OP_UNUSED_2DFF:
+    case OP_UNUSED_2EFF:
+    case OP_UNUSED_2FFF:
+    case OP_UNUSED_30FF:
+    case OP_UNUSED_31FF:
+    case OP_UNUSED_32FF:
+    case OP_UNUSED_33FF:
+    case OP_UNUSED_34FF:
+    case OP_UNUSED_35FF:
+    case OP_UNUSED_36FF:
+    case OP_UNUSED_37FF:
+    case OP_UNUSED_38FF:
+    case OP_UNUSED_39FF:
+    case OP_UNUSED_3AFF:
+    case OP_UNUSED_3BFF:
+    case OP_UNUSED_3CFF:
+    case OP_UNUSED_3DFF:
+    case OP_UNUSED_3EFF:
+    case OP_UNUSED_3FFF:
+    case OP_UNUSED_40FF:
+    case OP_UNUSED_41FF:
+    case OP_UNUSED_42FF:
+    case OP_UNUSED_43FF:
+    case OP_UNUSED_44FF:
+    case OP_UNUSED_45FF:
+    case OP_UNUSED_46FF:
+    case OP_UNUSED_47FF:
+    case OP_UNUSED_48FF:
+    case OP_UNUSED_49FF:
+    case OP_UNUSED_4AFF:
+    case OP_UNUSED_4BFF:
+    case OP_UNUSED_4CFF:
+    case OP_UNUSED_4DFF:
+    case OP_UNUSED_4EFF:
+    case OP_UNUSED_4FFF:
+    case OP_UNUSED_50FF:
+    case OP_UNUSED_51FF:
+    case OP_UNUSED_52FF:
+    case OP_UNUSED_53FF:
+    case OP_UNUSED_54FF:
+    case OP_UNUSED_55FF:
+    case OP_UNUSED_56FF:
+    case OP_UNUSED_57FF:
+    case OP_UNUSED_58FF:
+    case OP_UNUSED_59FF:
+    case OP_UNUSED_5AFF:
+    case OP_UNUSED_5BFF:
+    case OP_UNUSED_5CFF:
+    case OP_UNUSED_5DFF:
+    case OP_UNUSED_5EFF:
+    case OP_UNUSED_5FFF:
+    case OP_UNUSED_60FF:
+    case OP_UNUSED_61FF:
+    case OP_UNUSED_62FF:
+    case OP_UNUSED_63FF:
+    case OP_UNUSED_64FF:
+    case OP_UNUSED_65FF:
+    case OP_UNUSED_66FF:
+    case OP_UNUSED_67FF:
+    case OP_UNUSED_68FF:
+    case OP_UNUSED_69FF:
+    case OP_UNUSED_6AFF:
+    case OP_UNUSED_6BFF:
+    case OP_UNUSED_6CFF:
+    case OP_UNUSED_6DFF:
+    case OP_UNUSED_6EFF:
+    case OP_UNUSED_6FFF:
+    case OP_UNUSED_70FF:
+    case OP_UNUSED_71FF:
+    case OP_UNUSED_72FF:
+    case OP_UNUSED_73FF:
+    case OP_UNUSED_74FF:
+    case OP_UNUSED_75FF:
+    case OP_UNUSED_76FF:
+    case OP_UNUSED_77FF:
+    case OP_UNUSED_78FF:
+    case OP_UNUSED_79FF:
+    case OP_UNUSED_7AFF:
+    case OP_UNUSED_7BFF:
+    case OP_UNUSED_7CFF:
+    case OP_UNUSED_7DFF:
+    case OP_UNUSED_7EFF:
+    case OP_UNUSED_7FFF:
+    case OP_UNUSED_80FF:
+    case OP_UNUSED_81FF:
+    case OP_UNUSED_82FF:
+    case OP_UNUSED_83FF:
+    case OP_UNUSED_84FF:
+    case OP_UNUSED_85FF:
+    case OP_UNUSED_86FF:
+    case OP_UNUSED_87FF:
+    case OP_UNUSED_88FF:
+    case OP_UNUSED_89FF:
+    case OP_UNUSED_8AFF:
+    case OP_UNUSED_8BFF:
+    case OP_UNUSED_8CFF:
+    case OP_UNUSED_8DFF:
+    case OP_UNUSED_8EFF:
+    case OP_UNUSED_8FFF:
+    case OP_UNUSED_90FF:
+    case OP_UNUSED_91FF:
+    case OP_UNUSED_92FF:
+    case OP_UNUSED_93FF:
+    case OP_UNUSED_94FF:
+    case OP_UNUSED_95FF:
+    case OP_UNUSED_96FF:
+    case OP_UNUSED_97FF:
+    case OP_UNUSED_98FF:
+    case OP_UNUSED_99FF:
+    case OP_UNUSED_9AFF:
+    case OP_UNUSED_9BFF:
+    case OP_UNUSED_9CFF:
+    case OP_UNUSED_9DFF:
+    case OP_UNUSED_9EFF:
+    case OP_UNUSED_9FFF:
+    case OP_UNUSED_A0FF:
+    case OP_UNUSED_A1FF:
+    case OP_UNUSED_A2FF:
+    case OP_UNUSED_A3FF:
+    case OP_UNUSED_A4FF:
+    case OP_UNUSED_A5FF:
+    case OP_UNUSED_A6FF:
+    case OP_UNUSED_A7FF:
+    case OP_UNUSED_A8FF:
+    case OP_UNUSED_A9FF:
+    case OP_UNUSED_AAFF:
+    case OP_UNUSED_ABFF:
+    case OP_UNUSED_ACFF:
+    case OP_UNUSED_ADFF:
+    case OP_UNUSED_AEFF:
+    case OP_UNUSED_AFFF:
+    case OP_UNUSED_B0FF:
+    case OP_UNUSED_B1FF:
+    case OP_UNUSED_B2FF:
+    case OP_UNUSED_B3FF:
+    case OP_UNUSED_B4FF:
+    case OP_UNUSED_B5FF:
+    case OP_UNUSED_B6FF:
+    case OP_UNUSED_B7FF:
+    case OP_UNUSED_B8FF:
+    case OP_UNUSED_B9FF:
+    case OP_UNUSED_BAFF:
+    case OP_UNUSED_BBFF:
+    case OP_UNUSED_BCFF:
+    case OP_UNUSED_BDFF:
+    case OP_UNUSED_BEFF:
+    case OP_UNUSED_BFFF:
+    case OP_UNUSED_C0FF:
+    case OP_UNUSED_C1FF:
+    case OP_UNUSED_C2FF:
+    case OP_UNUSED_C3FF:
+    case OP_UNUSED_C4FF:
+    case OP_UNUSED_C5FF:
+    case OP_UNUSED_C6FF:
+    case OP_UNUSED_C7FF:
+    case OP_UNUSED_C8FF:
+    case OP_UNUSED_C9FF:
+    case OP_UNUSED_CAFF:
+    case OP_UNUSED_CBFF:
+    case OP_UNUSED_CCFF:
+    case OP_UNUSED_CDFF:
+    case OP_UNUSED_CEFF:
+    case OP_UNUSED_CFFF:
+    case OP_UNUSED_D0FF:
+    case OP_UNUSED_D1FF:
+    case OP_UNUSED_D2FF:
+    case OP_UNUSED_D3FF:
+    case OP_UNUSED_D4FF:
+    case OP_UNUSED_D5FF:
+    case OP_UNUSED_D6FF:
+    case OP_UNUSED_D7FF:
+    case OP_UNUSED_D8FF:
+    case OP_UNUSED_D9FF:
+    case OP_UNUSED_DAFF:
+    case OP_UNUSED_DBFF:
+    case OP_UNUSED_DCFF:
+    case OP_UNUSED_DDFF:
+    case OP_UNUSED_DEFF:
+    case OP_UNUSED_DFFF:
+    case OP_UNUSED_E0FF:
+    case OP_UNUSED_E1FF:
+    case OP_UNUSED_E2FF:
+    case OP_UNUSED_E3FF:
+    case OP_UNUSED_E4FF:
+    case OP_UNUSED_E5FF:
+    case OP_UNUSED_E6FF:
+    case OP_UNUSED_E7FF:
+    case OP_UNUSED_E8FF:
+    case OP_UNUSED_E9FF:
+    case OP_UNUSED_EAFF:
+    case OP_UNUSED_EBFF:
+    case OP_UNUSED_ECFF:
+    case OP_UNUSED_EDFF:
+    case OP_UNUSED_EEFF:
+    case OP_UNUSED_EFFF:
+    case OP_UNUSED_F0FF:
+    case OP_UNUSED_F1FF:
         failure = VERIFY_ERROR_GENERIC;
         break;
 
@@ -5754,7 +6088,7 @@
     if ((nextFlags & kInstrCanBranch) != 0) {
         bool isConditional;
 
-        if (!dvmGetBranchTarget(meth, insnFlags, insnIdx, &branchTarget,
+        if (!dvmGetBranchOffset(meth, insnFlags, insnIdx, &branchTarget,
                 &isConditional))
         {
             /* should never happen after static verification */
@@ -5922,10 +6256,12 @@
 /*
  * Dump the register types for the specifed address to the log file.
  */
-static void dumpRegTypes(const Method* meth, const InsnFlags* insnFlags,
+static void dumpRegTypes(const VerifierData* vdata,
     const RegisterLine* registerLine, int addr, const char* addrName,
     const UninitInstanceMap* uninitMap, int displayFlags)
 {
+    const Method* meth = vdata->method;
+    const InsnFlags* insnFlags = vdata->insnFlags;
     const RegType* addrRegs = registerLine->regTypes;
     int regCount = meth->registersSize;
     int fullRegCount = regCount + kExtraRegs;
@@ -5990,6 +6326,26 @@
         LOGI("%c0x%04x %s mst=%d\n", branchTarget ? '>' : ' ',
             addr, regChars, registerLine->monitorStackTop);
     }
+    if (displayFlags & DRT_SHOW_LIVENESS) {
+        /*
+         * We can't use registerLine->liveRegs because it might be the
+         * "work line" rather than the copy from RegisterTable.
+         */
+        BitVector* liveRegs = vdata->registerLines[addr].liveRegs;
+        if (liveRegs != NULL)  {
+            char liveChars[regCharSize + 1];
+            memset(liveChars, ' ', regCharSize);
+            liveChars[regCharSize] = '\0';
+
+            for (i = 0; i < regCount; i++) {
+                bool isLive = dvmIsBitSet(liveRegs, i);
+                liveChars[i + 1 + (i / 4)] = isLive ? '+' : '-';
+            }
+            LOGI("        %s\n", liveChars);
+        } else {
+            LOGI("        %c\n", '#');
+        }
+    }
 
     if (displayFlags & DRT_SHOW_REF_TYPES) {
         for (i = 0; i < regCount + kExtraRegs; i++) {
@@ -5998,7 +6354,7 @@
                 ClassObject* clazz;
 
                 clazz = regTypeReferenceToClass(addrRegs[i], uninitMap);
-                assert(dvmValidateObject((Object*)clazz));
+                assert(dvmIsValidObject((Object*)clazz));
                 if (i < regCount) {
                     LOGI("        %2d: 0x%08x %s%s\n",
                         i, addrRegs[i],
diff --git a/vm/analysis/CodeVerify.h b/vm/analysis/CodeVerify.h
index 85b2e5e..9369537 100644
--- a/vm/analysis/CodeVerify.h
+++ b/vm/analysis/CodeVerify.h
@@ -21,6 +21,7 @@
 #define _DALVIK_CODEVERIFY
 
 #include "analysis/VerifySubs.h"
+#include "analysis/VfyBasicBlock.h"
 
 
 /*
@@ -119,12 +120,18 @@
  * instruction.  We track the status of all registers, and (if the method
  * has any monitor-enter instructions) maintain a stack of entered monitors
  * (identified by code unit offset).
+ *
+ * If live-precise register maps are enabled, the "liveRegs" vector will
+ * be populated.  Unlike the other lists of registers here, we do not
+ * track the liveness of the method result register (which is not visible
+ * to the GC).
  */
 typedef struct {
     RegType*        regTypes;
     MonitorEntries* monitorEntries;
     u4*             monitorStack;
     unsigned int    monitorStackTop;
+    BitVector*      liveRegs;
 } RegisterLine;
 
 /*
@@ -187,6 +194,12 @@
      */
     size_t          newInstanceCount;
     size_t          monitorEnterCount;
+
+    /*
+     * Array of pointers to basic blocks, one entry per code unit.  Used
+     * for liveness analysis.
+     */
+    VfyBasicBlock** basicBlocks;
 } VerifierData;
 
 
diff --git a/vm/analysis/DexPrepare.c b/vm/analysis/DexPrepare.c
index 4a17123..0f82cbf 100644
--- a/vm/analysis/DexPrepare.c
+++ b/vm/analysis/DexPrepare.c
@@ -42,8 +42,8 @@
 
 
 /* fwd */
-static bool rewriteDex(u1* addr, int len, u4* pHeaderFlags,
-    DexClassLookup** ppClassLookup);
+static bool rewriteDex(u1* addr, int len, bool doVerify, bool doOpt,
+    DexClassLookup** ppClassLookup, DvmDex** ppDvmDex);
 static bool loadAllClasses(DvmDex* pDvmDex);
 static void verifyAndOptimizeClasses(DexFile* pDexFile, bool doVerify,
     bool doOpt);
@@ -173,19 +173,22 @@
     } else {
         bool expectVerify, expectOpt;
 
-        if (gDvm.classVerifyMode == VERIFY_MODE_NONE)
+        if (gDvm.classVerifyMode == VERIFY_MODE_NONE) {
             expectVerify = false;
-        else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE)
+        } else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE) {
             expectVerify = !isBootstrap;
-        else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/
+        } else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/ {
             expectVerify = true;
+        }
 
-        if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE)
+        if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE) {
             expectOpt = false;
-        else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED)
+        } else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED ||
+                   gDvm.dexOptMode == OPTIMIZE_MODE_FULL) {
             expectOpt = expectVerify;
-        else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/
+        } else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/ {
             expectOpt = true;
+        }
 
         LOGV("checking deps, expecting vfy=%d opt=%d\n",
             expectVerify, expectOpt);
@@ -342,7 +345,7 @@
             LOGW("ANDROID_ROOT not set, defaulting to /system\n");
             androidRoot = "/system";
         }
-        execFile = malloc(strlen(androidRoot) + strlen(kDexOptBin) + 1);
+        execFile = (char*)malloc(strlen(androidRoot) + strlen(kDexOptBin) + 1);
         strcpy(execFile, androidRoot);
         strcat(execFile, kDexOptBin);
 
@@ -476,7 +479,6 @@
 {
     DexClassLookup* pClassLookup = NULL;
     RegisterMapBuilder* pRegMapBuilder = NULL;
-    u4 headerFlags = 0;
 
     assert(gDvm.optimizing);
 
@@ -518,6 +520,24 @@
             goto bail;
         }
 
+        bool doVerify, doOpt;
+        if (gDvm.classVerifyMode == VERIFY_MODE_NONE) {
+            doVerify = false;
+        } else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE) {
+            doVerify = !gDvm.optimizingBootstrapClass;
+        } else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/ {
+            doVerify = true;
+        }
+
+        if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE) {
+            doOpt = false;
+        } else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED ||
+                   gDvm.dexOptMode == OPTIMIZE_MODE_FULL) {
+            doOpt = doVerify;
+        } else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/ {
+            doOpt = true;
+        }
+
         /*
          * Rewrite the file.  Byte reordering, structure realigning,
          * class verification, and bytecode optimization are all performed
@@ -527,11 +547,10 @@
          * In practice this would be annoying to deal with, so the file
          * layout is designed so that it can always be rewritten in place.
          *
-         * This sets "headerFlags" and creates the class lookup table as
-         * part of doing the processing.
+         * This creates the class lookup table as part of doing the processing.
          */
         success = rewriteDex(((u1*) mapAddr) + dexOffset, dexLength,
-                    &headerFlags, &pClassLookup);
+                    doVerify, doOpt, &pClassLookup, NULL);
 
         if (success) {
             DvmDex* pDvmDex = NULL;
@@ -653,8 +672,11 @@
     optHdr.depsLength = (u4) depsLength;
     optHdr.optOffset = (u4) optOffset;
     optHdr.optLength = (u4) optLength;
-
-    optHdr.flags = headerFlags;
+#if __BYTE_ORDER != __LITTLE_ENDIAN
+    optHdr.flags = DEX_OPT_FLAG_BIG;
+#else
+    optHdr.flags = 0;
+#endif
     optHdr.checksum = optChecksum;
 
     fsync(fd);      /* ensure previous writes go before header is written */
@@ -674,49 +696,62 @@
     return result;
 }
 
+/*
+ * Prepare an in-memory DEX file.
+ *
+ * The data was presented to the VM as a byte array rather than a file.
+ * We want to do the same basic set of operations, but we can just leave
+ * them in memory instead of writing them out to a cached optimized DEX file.
+ */
+bool dvmPrepareDexInMemory(u1* addr, size_t len, DvmDex** ppDvmDex)
+{
+    DexClassLookup* pClassLookup = NULL;
+
+    /*
+     * Byte-swap, realign, verify basic DEX file structure.
+     *
+     * We could load + verify + optimize here as well, but that's probably
+     * not desirable.
+     *
+     * (The bulk-verification code is currently only setting the DEX
+     * file's "verified" flag, not updating the ClassObject.  This would
+     * also need to be changed, or we will try to verify the class twice,
+     * and possibly reject it when optimized opcodes are encountered.)
+     */
+    if (!rewriteDex(addr, len, false, false, &pClassLookup, ppDvmDex)) {
+        return false;
+    }
+
+    (*ppDvmDex)->pDexFile->pClassLookup = pClassLookup;
+
+    return true;
+}
 
 /*
  * Perform in-place rewrites on a memory-mapped DEX file.
  *
- * This happens in a short-lived child process, so we can go nutty with
- * loading classes and allocating memory.
+ * If this is called from a short-lived child process (dexopt), we can
+ * go nutty with loading classes and allocating memory.  When it's
+ * called to prepare classes provided in a byte array, we may want to
+ * be more conservative.
+ *
+ * If "ppClassLookup" is non-NULL, a pointer to a newly-allocated
+ * DexClassLookup will be returned on success.
+ *
+ * If "ppDvmDex" is non-NULL, a newly-allocated DvmDex struct will be
+ * returned on success.
  */
-static bool rewriteDex(u1* addr, int len, u4* pHeaderFlags,
-    DexClassLookup** ppClassLookup)
+static bool rewriteDex(u1* addr, int len, bool doVerify, bool doOpt,
+    DexClassLookup** ppClassLookup, DvmDex** ppDvmDex)
 {
+    DexClassLookup* pClassLookup = NULL;
     u8 prepWhen, loadWhen, verifyOptWhen;
     DvmDex* pDvmDex = NULL;
-    bool doVerify, doOpt;
     bool result = false;
 
-    *pHeaderFlags = 0;
-
     /* if the DEX is in the wrong byte order, swap it now */
     if (dexSwapAndVerify(addr, len) != 0)
         goto bail;
-#if __BYTE_ORDER != __LITTLE_ENDIAN
-    *pHeaderFlags |= DEX_OPT_FLAG_BIG;
-#endif
-
-    if (gDvm.classVerifyMode == VERIFY_MODE_NONE)
-        doVerify = false;
-    else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE)
-        doVerify = !gDvm.optimizingBootstrapClass;
-    else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/
-        doVerify = true;
-
-    if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE)
-        doOpt = false;
-    else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED)
-        doOpt = doVerify;
-    else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/
-        doOpt = true;
-
-    /* TODO: decide if this is actually useful */
-    if (doVerify)
-        *pHeaderFlags |= DEX_FLAG_VERIFIED;
-    if (doOpt)
-        *pHeaderFlags |= DEX_OPT_FLAG_FIELDS | DEX_OPT_FLAG_INVOCATIONS;
 
     /*
      * Now that the DEX file can be read directly, create a DexFile struct
@@ -730,10 +765,14 @@
     /*
      * Create the class lookup table.  This will eventually be appended
      * to the end of the .odex.
+     *
+     * We create a temporary link from the DexFile for the benefit of
+     * class loading, below.
      */
-    *ppClassLookup = dexCreateClassLookup(pDvmDex->pDexFile);
-    if (*ppClassLookup == NULL)
+    pClassLookup = dexCreateClassLookup(pDvmDex->pDexFile);
+    if (pClassLookup == NULL)
         goto bail;
+    pDvmDex->pDexFile->pClassLookup = pClassLookup;
 
     /*
      * If we're not going to attempt to verify or optimize the classes,
@@ -744,9 +783,6 @@
         goto bail;
     }
 
-    /* this is needed for the next part */
-    pDvmDex->pDexFile->pClassLookup = *ppClassLookup;
-
     prepWhen = dvmGetRelativeTimeUsec();
 
     /*
@@ -758,6 +794,16 @@
     loadWhen = dvmGetRelativeTimeUsec();
 
     /*
+     * Create a data structure for use by the bytecode optimizer.
+     * We need to look up methods in a few classes, so this may cause
+     * a bit of class loading.  We usually do this during VM init, but
+     * for dexopt on core.jar the order of operations gets a bit tricky,
+     * so we defer it to here.
+     */
+    if (!dvmCreateInlineSubsTable())
+        goto bail;
+
+    /*
      * Verify and optimize all classes in the DEX file (command-line
      * options permitting).
      *
@@ -782,8 +828,23 @@
     result = true;
 
 bail:
-    /* free up storage */
-    dvmDexFileFree(pDvmDex);
+    /*
+     * On success, return the pieces that the caller asked for.
+     */
+    if (ppDvmDex == NULL || !result) {
+        dvmDexFileFree(pDvmDex);
+    } else {
+        *ppDvmDex = pDvmDex;
+    }
+
+    if (ppClassLookup == NULL || !result) {
+        free(pClassLookup);
+    } else {
+        *ppClassLookup = pClassLookup;
+    }
+
+    /* break link between the two */
+    pDvmDex->pDexFile->pClassLookup = NULL;
 
     return result;
 }
@@ -817,15 +878,29 @@
     dvmSetBootPathExtraDex(pDvmDex);
 
     /*
-     * We have some circularity issues with Class and Object that are most
-     * easily avoided by ensuring that Object is never the first thing we
-     * try to find.  Take care of that here.  (We only need to do this when
-     * loading classes from the DEX file that contains Object, and only
-     * when Object comes first in the list, but it costs very little to
-     * do it in all cases.)
+     * At this point, it is safe -- and necessary! -- to look up the
+     * VM's required classes and members, even when what we are in the
+     * process of processing is the core library that defines these
+     * classes itself. (The reason it is necessary is that in the act
+     * of initializing the class Class, below, the system will end up
+     * referring to many of the class references that got set up by
+     * this call.)
      */
-    if (dvmFindSystemClass("Ljava/lang/Class;") == NULL) {
-        LOGE("ERROR: java.lang.Class does not exist!\n");
+    if (!dvmFindRequiredClassesAndMembers()) {
+        return false;
+    }
+
+    /*
+     * We have some circularity issues with Class and Object that are
+     * most easily avoided by ensuring that Object is never the first
+     * thing we try to find-and-initialize. The call to
+     * dvmFindSystemClass() here takes care of that situation. (We
+     * only need to do this when loading classes from the DEX file
+     * that contains Object, and only when Object comes first in the
+     * list, but it costs very little to do it in all cases.)
+     */
+    if (!dvmInitClass(gDvm.classJavaLangClass)) {
+        LOGE("ERROR: failed to initialize the class Class!\n");
         return false;
     }
 
@@ -874,21 +949,6 @@
     u4 count = pDexFile->pHeader->classDefsSize;
     u4 idx;
 
-    /*
-     * Create a data structure for use by the bytecode optimizer.  We
-     * stuff it into a global so we don't have to pass it around as
-     * a function argument.
-     *
-     * We could create this at VM startup, but there's no need to do so
-     * unless we're optimizing, which means we're in dexopt, and we're
-     * only going to call here once.
-     */
-    if (doOpt) {
-        gDvm.inlineSubs = dvmCreateInlineSubsTable();
-        if (gDvm.inlineSubs == NULL)
-            return;
-    }
-
     for (idx = 0; idx < count; idx++) {
         const DexClassDef* pClassDef;
         const char* classDescriptor;
@@ -909,11 +969,6 @@
         }
     }
 
-    if (gDvm.inlineSubs != NULL) {
-        dvmFreeInlineSubsTable(gDvm.inlineSubs);
-        gDvm.inlineSubs = NULL;
-    }
-
 #ifdef VERIFIER_STATS
     LOGI("Verifier stats:\n");
     LOGI(" methods examined        : %u\n", gDvm.verifierStats.methodsExamined);
@@ -973,7 +1028,9 @@
     }
 
     if (doOpt) {
-        if (!verified && gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED) {
+        bool needVerify = (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED ||
+                           gDvm.dexOptMode == OPTIMIZE_MODE_FULL);
+        if (!verified && needVerify) {
             LOGV("DexOpt: not optimizing '%s': not verified\n",
                 classDescriptor);
         } else {
@@ -1113,29 +1170,13 @@
     /*
      * Do the header flags match up with what we want?
      *
-     * This is useful because it allows us to automatically regenerate
-     * a file when settings change (e.g. verification is now mandatory),
-     * but can cause difficulties if the bootstrap classes we depend upon
-     * were handled differently than the current options specify.  We get
-     * upset because they're not verified or optimized, but we're not able
-     * to regenerate them because the installer won't let us.
-     *
-     * (This is also of limited value when !sourceAvail.)
-     *
-     * So, for now, we essentially ignore "expectVerify" and "expectOpt"
-     * by limiting the match mask.
-     *
-     * The only thing we really can't handle is incorrect byte-ordering.
+     * The only thing we really can't handle is incorrect byte ordering.
      */
     const u4 matchMask = DEX_OPT_FLAG_BIG;
     u4 expectedFlags = 0;
 #if __BYTE_ORDER != __LITTLE_ENDIAN
     expectedFlags |= DEX_OPT_FLAG_BIG;
 #endif
-    if (expectVerify)
-        expectedFlags |= DEX_FLAG_VERIFIED;
-    if (expectOpt)
-        expectedFlags |= DEX_OPT_FLAG_FIELDS | DEX_OPT_FLAG_INVOCATIONS;
     if ((expectedFlags & matchMask) != (optHdr.flags & matchMask)) {
         LOGI("DexOpt: header flag mismatch (0x%02x vs 0x%02x, mask=0x%02x)\n",
             expectedFlags, optHdr.flags, matchMask);
@@ -1287,7 +1328,7 @@
 
     bufLen += 4*4 + numDeps * (4+kSHA1DigestLen);
 
-    buf = malloc(bufLen);
+    buf = (u1*)malloc(bufLen);
 
     set4LE(buf+0, modWhen);
     set4LE(buf+4, crc);
diff --git a/vm/analysis/DexPrepare.h b/vm/analysis/DexPrepare.h
index bfa5fb5..0ee76e5 100644
--- a/vm/analysis/DexPrepare.h
+++ b/vm/analysis/DexPrepare.h
@@ -23,12 +23,16 @@
 /*
  * Global DEX optimizer control.  Determines the circumstances in which we
  * try to rewrite instructions in the DEX file.
+ *
+ * Optimizing is performed ahead-of-time by dexopt and, in some cases, at
+ * load time by the VM.
  */
 typedef enum DexOptimizerMode {
     OPTIMIZE_MODE_UNKNOWN = 0,
-    OPTIMIZE_MODE_NONE,         /* never optimize */
+    OPTIMIZE_MODE_NONE,         /* never optimize (except "essential") */
     OPTIMIZE_MODE_VERIFIED,     /* only optimize verified classes (default) */
-    OPTIMIZE_MODE_ALL           /* optimize all classes */
+    OPTIMIZE_MODE_ALL,          /* optimize verified & unverified (risky) */
+    OPTIMIZE_MODE_FULL          /* fully opt verified classes at load time */
 } DexOptimizerMode;
 
 /* some additional bit flags for dexopt */
@@ -119,4 +123,15 @@
 bool dvmContinueOptimization(int fd, off_t dexOffset, long dexLength,
     const char* fileName, u4 modWhen, u4 crc, bool isBootstrap);
 
+/*
+ * Prepare DEX data that is only available to the VM as in-memory data.
+ */
+bool dvmPrepareDexInMemory(u1* addr, size_t len, DvmDex** ppDvmDex);
+
+/*
+ * Prep data structures.
+ */
+bool dvmCreateInlineSubsTable(void);
+void dvmFreeInlineSubsTable(void);
+
 #endif /*_DALVIK_DEXPREPARE*/
diff --git a/vm/analysis/DexVerify.c b/vm/analysis/DexVerify.c
index 2e2d267..16e5738 100644
--- a/vm/analysis/DexVerify.c
+++ b/vm/analysis/DexVerify.c
@@ -102,7 +102,7 @@
         }
 
         Opcode opcode = dexOpcodeFromCodeUnit(*insns);
-        if (opcode == OP_NEW_INSTANCE)
+        if (opcode == OP_NEW_INSTANCE || opcode == OP_NEW_INSTANCE_JUMBO)
             newInstanceCount++;
         if (opcode == OP_MONITOR_ENTER)
             monitorEnterCount++;
@@ -111,7 +111,7 @@
         i += width;
         insns += width;
     }
-    if (i != (int) dvmGetMethodInsnsSize(meth)) {
+    if (i != (int) vdata->insnsSize) {
         LOG_VFY_METH(meth, "VFY: code did not end where expected (%d vs. %d)\n",
             i, dvmGetMethodInsnsSize(meth));
         goto bail;
@@ -140,19 +140,16 @@
     const DexCode* pCode = dvmGetMethodCode(meth);
     u4 triesSize = pCode->triesSize;
     const DexTry* pTries;
-    u4 handlersSize;
-    u4 offset;
-    u4 i;
+    u4 idx;
 
     if (triesSize == 0) {
         return true;
     }
 
     pTries = dexGetTries(pCode);
-    handlersSize = dexGetHandlersSize(pCode);
 
-    for (i = 0; i < triesSize; i++) {
-        const DexTry* pTry = &pTries[i];
+    for (idx = 0; idx < triesSize; idx++) {
+        const DexTry* pTry = &pTries[idx];
         u4 start = pTry->startAddr;
         u4 end = start + pTry->insnCount;
         u4 addr;
@@ -180,8 +177,9 @@
     }
 
     /* Iterate over each of the handlers to verify target addresses. */
-    offset = dexGetFirstHandlerOffset(pCode);
-    for (i = 0; i < handlersSize; i++) {
+    u4 handlersSize = dexGetHandlersSize(pCode);
+    u4 offset = dexGetFirstHandlerOffset(pCode);
+    for (idx = 0; idx < handlersSize; idx++) {
         DexCatchIterator iterator;
         dexCatchIteratorInit(&iterator, pCode, offset);
 
@@ -252,6 +250,7 @@
     vdata.insnRegCount = meth->registersSize;
     vdata.insnFlags = NULL;
     vdata.uninitMap = NULL;
+    vdata.basicBlocks = NULL;
 
     /*
      * If there aren't any instructions, make sure that's expected, then
@@ -285,8 +284,7 @@
      * TODO: Consider keeping a reusable pre-allocated array sitting
      * around for smaller methods.
      */
-    vdata.insnFlags = (InsnFlags*)
-        calloc(dvmGetMethodInsnsSize(meth), sizeof(InsnFlags));
+    vdata.insnFlags = (InsnFlags*) calloc(vdata.insnsSize, sizeof(InsnFlags));
     if (vdata.insnFlags == NULL)
         goto bail;
 
@@ -307,12 +305,14 @@
 
     /*
      * Set the "in try" flags for all instructions guarded by a "try" block.
+     * Also sets the "branch target" flag on exception handlers.
      */
     if (!scanTryCatchBlocks(meth, vdata.insnFlags))
         goto bail;
 
     /*
-     * Perform static instruction verification.
+     * Perform static instruction verification.  Also sets the "branch
+     * target" flags.
      */
     if (!verifyInstructions(&vdata))
         goto bail;
@@ -332,6 +332,7 @@
     result = true;
 
 bail:
+    dvmFreeVfyBasicBlocks(&vdata);
     dvmFreeUninitInstanceMap(vdata.uninitMap);
     free(vdata.insnFlags);
     return result;
@@ -717,10 +718,10 @@
     int curOffset, bool selfOkay)
 {
     const int insnCount = dvmGetMethodInsnsSize(meth);
-    int offset, absOffset;
+    s4 offset, absOffset;
     bool isConditional;
 
-    if (!dvmGetBranchTarget(meth, insnFlags, curOffset, &offset,
+    if (!dvmGetBranchOffset(meth, insnFlags, curOffset, &offset,
             &isConditional))
         return false;
 
@@ -950,20 +951,25 @@
             okay &= checkStringIndex(pDvmDex, decInsn.vB);
             break;
         case OP_CONST_CLASS:
+        case OP_CONST_CLASS_JUMBO:
         case OP_CHECK_CAST:
+        case OP_CHECK_CAST_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkTypeIndex(pDvmDex, decInsn.vB);
             break;
         case OP_INSTANCE_OF:
+        case OP_INSTANCE_OF_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkRegisterIndex(meth, decInsn.vB);
             okay &= checkTypeIndex(pDvmDex, decInsn.vC);
             break;
         case OP_NEW_INSTANCE:
+        case OP_NEW_INSTANCE_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkNewInstance(pDvmDex, decInsn.vB);
             break;
         case OP_NEW_ARRAY:
+        case OP_NEW_ARRAY_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkRegisterIndex(meth, decInsn.vB);
             okay &= checkNewArray(pDvmDex, decInsn.vC);
@@ -1071,44 +1077,72 @@
             okay &= checkBranchTarget(meth, insnFlags, codeOffset, false);
             break;
         case OP_IGET:
+        case OP_IGET_JUMBO:
         case OP_IGET_OBJECT:
+        case OP_IGET_OBJECT_JUMBO:
         case OP_IGET_BOOLEAN:
+        case OP_IGET_BOOLEAN_JUMBO:
         case OP_IGET_BYTE:
+        case OP_IGET_BYTE_JUMBO:
         case OP_IGET_CHAR:
+        case OP_IGET_CHAR_JUMBO:
         case OP_IGET_SHORT:
+        case OP_IGET_SHORT_JUMBO:
         case OP_IPUT:
+        case OP_IPUT_JUMBO:
         case OP_IPUT_OBJECT:
+        case OP_IPUT_OBJECT_JUMBO:
         case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BOOLEAN_JUMBO:
         case OP_IPUT_BYTE:
+        case OP_IPUT_BYTE_JUMBO:
         case OP_IPUT_CHAR:
+        case OP_IPUT_CHAR_JUMBO:
         case OP_IPUT_SHORT:
+        case OP_IPUT_SHORT_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkRegisterIndex(meth, decInsn.vB);
             okay &= checkFieldIndex(pDvmDex, decInsn.vC);
             break;
         case OP_IGET_WIDE:
+        case OP_IGET_WIDE_JUMBO:
         case OP_IPUT_WIDE:
+        case OP_IPUT_WIDE_JUMBO:
             okay &= checkWideRegisterIndex(meth, decInsn.vA);
             okay &= checkRegisterIndex(meth, decInsn.vB);
             okay &= checkFieldIndex(pDvmDex, decInsn.vC);
             break;
         case OP_SGET:
+        case OP_SGET_JUMBO:
         case OP_SGET_OBJECT:
+        case OP_SGET_OBJECT_JUMBO:
         case OP_SGET_BOOLEAN:
+        case OP_SGET_BOOLEAN_JUMBO:
         case OP_SGET_BYTE:
+        case OP_SGET_BYTE_JUMBO:
         case OP_SGET_CHAR:
+        case OP_SGET_CHAR_JUMBO:
         case OP_SGET_SHORT:
+        case OP_SGET_SHORT_JUMBO:
         case OP_SPUT:
+        case OP_SPUT_JUMBO:
         case OP_SPUT_OBJECT:
+        case OP_SPUT_OBJECT_JUMBO:
         case OP_SPUT_BOOLEAN:
+        case OP_SPUT_BOOLEAN_JUMBO:
         case OP_SPUT_BYTE:
+        case OP_SPUT_BYTE_JUMBO:
         case OP_SPUT_CHAR:
+        case OP_SPUT_CHAR_JUMBO:
         case OP_SPUT_SHORT:
+        case OP_SPUT_SHORT_JUMBO:
             okay &= checkRegisterIndex(meth, decInsn.vA);
             okay &= checkFieldIndex(pDvmDex, decInsn.vB);
             break;
         case OP_SGET_WIDE:
+        case OP_SGET_WIDE_JUMBO:
         case OP_SPUT_WIDE:
+        case OP_SPUT_WIDE_JUMBO:
             okay &= checkWideRegisterIndex(meth, decInsn.vA);
             okay &= checkFieldIndex(pDvmDex, decInsn.vB);
             break;
@@ -1118,6 +1152,7 @@
             okay &= checkVarargRegs(meth, &decInsn);
             break;
         case OP_FILLED_NEW_ARRAY_RANGE:
+        case OP_FILLED_NEW_ARRAY_JUMBO:
             okay &= checkTypeIndex(pDvmDex, decInsn.vB);
             okay &= checkVarargRangeRegs(meth, &decInsn);
             break;
@@ -1131,10 +1166,15 @@
             okay &= checkVarargRegs(meth, &decInsn);
             break;
         case OP_INVOKE_VIRTUAL_RANGE:
+        case OP_INVOKE_VIRTUAL_JUMBO:
         case OP_INVOKE_SUPER_RANGE:
+        case OP_INVOKE_SUPER_JUMBO:
         case OP_INVOKE_DIRECT_RANGE:
+        case OP_INVOKE_DIRECT_JUMBO:
         case OP_INVOKE_STATIC_RANGE:
+        case OP_INVOKE_STATIC_JUMBO:
         case OP_INVOKE_INTERFACE_RANGE:
+        case OP_INVOKE_INTERFACE_JUMBO:
             okay &= checkMethodIndex(pDvmDex, decInsn.vB);
             okay &= checkVarargRangeRegs(meth, &decInsn);
             break;
@@ -1152,11 +1192,25 @@
         case OP_IPUT_WIDE_VOLATILE:
         case OP_SGET_WIDE_VOLATILE:
         case OP_SPUT_WIDE_VOLATILE:
+        case OP_IGET_VOLATILE_JUMBO:
+        case OP_IPUT_VOLATILE_JUMBO:
+        case OP_SGET_VOLATILE_JUMBO:
+        case OP_SPUT_VOLATILE_JUMBO:
+        case OP_IGET_OBJECT_VOLATILE_JUMBO:
+        case OP_IPUT_OBJECT_VOLATILE_JUMBO:
+        case OP_SGET_OBJECT_VOLATILE_JUMBO:
+        case OP_SPUT_OBJECT_VOLATILE_JUMBO:
+        case OP_IGET_WIDE_VOLATILE_JUMBO:
+        case OP_IPUT_WIDE_VOLATILE_JUMBO:
+        case OP_SGET_WIDE_VOLATILE_JUMBO:
+        case OP_SPUT_WIDE_VOLATILE_JUMBO:
         case OP_BREAKPOINT:
         case OP_THROW_VERIFICATION_ERROR:
+        case OP_THROW_VERIFICATION_ERROR_JUMBO:
         case OP_EXECUTE_INLINE:
         case OP_EXECUTE_INLINE_RANGE:
-        case OP_INVOKE_DIRECT_EMPTY:
+        case OP_INVOKE_OBJECT_INIT_RANGE:
+        case OP_INVOKE_OBJECT_INIT_JUMBO:
         case OP_RETURN_VOID_BARRIER:
         case OP_IGET_QUICK:
         case OP_IGET_WIDE_QUICK:
@@ -1178,7 +1232,210 @@
         case OP_UNUSED_79:
         case OP_UNUSED_7A:
         case OP_DISPATCH_FF:
-            LOGE("VFY: unexpected opcode %02x\n", decInsn.opcode);
+        case OP_UNUSED_27FF:
+        case OP_UNUSED_28FF:
+        case OP_UNUSED_29FF:
+        case OP_UNUSED_2AFF:
+        case OP_UNUSED_2BFF:
+        case OP_UNUSED_2CFF:
+        case OP_UNUSED_2DFF:
+        case OP_UNUSED_2EFF:
+        case OP_UNUSED_2FFF:
+        case OP_UNUSED_30FF:
+        case OP_UNUSED_31FF:
+        case OP_UNUSED_32FF:
+        case OP_UNUSED_33FF:
+        case OP_UNUSED_34FF:
+        case OP_UNUSED_35FF:
+        case OP_UNUSED_36FF:
+        case OP_UNUSED_37FF:
+        case OP_UNUSED_38FF:
+        case OP_UNUSED_39FF:
+        case OP_UNUSED_3AFF:
+        case OP_UNUSED_3BFF:
+        case OP_UNUSED_3CFF:
+        case OP_UNUSED_3DFF:
+        case OP_UNUSED_3EFF:
+        case OP_UNUSED_3FFF:
+        case OP_UNUSED_40FF:
+        case OP_UNUSED_41FF:
+        case OP_UNUSED_42FF:
+        case OP_UNUSED_43FF:
+        case OP_UNUSED_44FF:
+        case OP_UNUSED_45FF:
+        case OP_UNUSED_46FF:
+        case OP_UNUSED_47FF:
+        case OP_UNUSED_48FF:
+        case OP_UNUSED_49FF:
+        case OP_UNUSED_4AFF:
+        case OP_UNUSED_4BFF:
+        case OP_UNUSED_4CFF:
+        case OP_UNUSED_4DFF:
+        case OP_UNUSED_4EFF:
+        case OP_UNUSED_4FFF:
+        case OP_UNUSED_50FF:
+        case OP_UNUSED_51FF:
+        case OP_UNUSED_52FF:
+        case OP_UNUSED_53FF:
+        case OP_UNUSED_54FF:
+        case OP_UNUSED_55FF:
+        case OP_UNUSED_56FF:
+        case OP_UNUSED_57FF:
+        case OP_UNUSED_58FF:
+        case OP_UNUSED_59FF:
+        case OP_UNUSED_5AFF:
+        case OP_UNUSED_5BFF:
+        case OP_UNUSED_5CFF:
+        case OP_UNUSED_5DFF:
+        case OP_UNUSED_5EFF:
+        case OP_UNUSED_5FFF:
+        case OP_UNUSED_60FF:
+        case OP_UNUSED_61FF:
+        case OP_UNUSED_62FF:
+        case OP_UNUSED_63FF:
+        case OP_UNUSED_64FF:
+        case OP_UNUSED_65FF:
+        case OP_UNUSED_66FF:
+        case OP_UNUSED_67FF:
+        case OP_UNUSED_68FF:
+        case OP_UNUSED_69FF:
+        case OP_UNUSED_6AFF:
+        case OP_UNUSED_6BFF:
+        case OP_UNUSED_6CFF:
+        case OP_UNUSED_6DFF:
+        case OP_UNUSED_6EFF:
+        case OP_UNUSED_6FFF:
+        case OP_UNUSED_70FF:
+        case OP_UNUSED_71FF:
+        case OP_UNUSED_72FF:
+        case OP_UNUSED_73FF:
+        case OP_UNUSED_74FF:
+        case OP_UNUSED_75FF:
+        case OP_UNUSED_76FF:
+        case OP_UNUSED_77FF:
+        case OP_UNUSED_78FF:
+        case OP_UNUSED_79FF:
+        case OP_UNUSED_7AFF:
+        case OP_UNUSED_7BFF:
+        case OP_UNUSED_7CFF:
+        case OP_UNUSED_7DFF:
+        case OP_UNUSED_7EFF:
+        case OP_UNUSED_7FFF:
+        case OP_UNUSED_80FF:
+        case OP_UNUSED_81FF:
+        case OP_UNUSED_82FF:
+        case OP_UNUSED_83FF:
+        case OP_UNUSED_84FF:
+        case OP_UNUSED_85FF:
+        case OP_UNUSED_86FF:
+        case OP_UNUSED_87FF:
+        case OP_UNUSED_88FF:
+        case OP_UNUSED_89FF:
+        case OP_UNUSED_8AFF:
+        case OP_UNUSED_8BFF:
+        case OP_UNUSED_8CFF:
+        case OP_UNUSED_8DFF:
+        case OP_UNUSED_8EFF:
+        case OP_UNUSED_8FFF:
+        case OP_UNUSED_90FF:
+        case OP_UNUSED_91FF:
+        case OP_UNUSED_92FF:
+        case OP_UNUSED_93FF:
+        case OP_UNUSED_94FF:
+        case OP_UNUSED_95FF:
+        case OP_UNUSED_96FF:
+        case OP_UNUSED_97FF:
+        case OP_UNUSED_98FF:
+        case OP_UNUSED_99FF:
+        case OP_UNUSED_9AFF:
+        case OP_UNUSED_9BFF:
+        case OP_UNUSED_9CFF:
+        case OP_UNUSED_9DFF:
+        case OP_UNUSED_9EFF:
+        case OP_UNUSED_9FFF:
+        case OP_UNUSED_A0FF:
+        case OP_UNUSED_A1FF:
+        case OP_UNUSED_A2FF:
+        case OP_UNUSED_A3FF:
+        case OP_UNUSED_A4FF:
+        case OP_UNUSED_A5FF:
+        case OP_UNUSED_A6FF:
+        case OP_UNUSED_A7FF:
+        case OP_UNUSED_A8FF:
+        case OP_UNUSED_A9FF:
+        case OP_UNUSED_AAFF:
+        case OP_UNUSED_ABFF:
+        case OP_UNUSED_ACFF:
+        case OP_UNUSED_ADFF:
+        case OP_UNUSED_AEFF:
+        case OP_UNUSED_AFFF:
+        case OP_UNUSED_B0FF:
+        case OP_UNUSED_B1FF:
+        case OP_UNUSED_B2FF:
+        case OP_UNUSED_B3FF:
+        case OP_UNUSED_B4FF:
+        case OP_UNUSED_B5FF:
+        case OP_UNUSED_B6FF:
+        case OP_UNUSED_B7FF:
+        case OP_UNUSED_B8FF:
+        case OP_UNUSED_B9FF:
+        case OP_UNUSED_BAFF:
+        case OP_UNUSED_BBFF:
+        case OP_UNUSED_BCFF:
+        case OP_UNUSED_BDFF:
+        case OP_UNUSED_BEFF:
+        case OP_UNUSED_BFFF:
+        case OP_UNUSED_C0FF:
+        case OP_UNUSED_C1FF:
+        case OP_UNUSED_C2FF:
+        case OP_UNUSED_C3FF:
+        case OP_UNUSED_C4FF:
+        case OP_UNUSED_C5FF:
+        case OP_UNUSED_C6FF:
+        case OP_UNUSED_C7FF:
+        case OP_UNUSED_C8FF:
+        case OP_UNUSED_C9FF:
+        case OP_UNUSED_CAFF:
+        case OP_UNUSED_CBFF:
+        case OP_UNUSED_CCFF:
+        case OP_UNUSED_CDFF:
+        case OP_UNUSED_CEFF:
+        case OP_UNUSED_CFFF:
+        case OP_UNUSED_D0FF:
+        case OP_UNUSED_D1FF:
+        case OP_UNUSED_D2FF:
+        case OP_UNUSED_D3FF:
+        case OP_UNUSED_D4FF:
+        case OP_UNUSED_D5FF:
+        case OP_UNUSED_D6FF:
+        case OP_UNUSED_D7FF:
+        case OP_UNUSED_D8FF:
+        case OP_UNUSED_D9FF:
+        case OP_UNUSED_DAFF:
+        case OP_UNUSED_DBFF:
+        case OP_UNUSED_DCFF:
+        case OP_UNUSED_DDFF:
+        case OP_UNUSED_DEFF:
+        case OP_UNUSED_DFFF:
+        case OP_UNUSED_E0FF:
+        case OP_UNUSED_E1FF:
+        case OP_UNUSED_E2FF:
+        case OP_UNUSED_E3FF:
+        case OP_UNUSED_E4FF:
+        case OP_UNUSED_E5FF:
+        case OP_UNUSED_E6FF:
+        case OP_UNUSED_E7FF:
+        case OP_UNUSED_E8FF:
+        case OP_UNUSED_E9FF:
+        case OP_UNUSED_EAFF:
+        case OP_UNUSED_EBFF:
+        case OP_UNUSED_ECFF:
+        case OP_UNUSED_EDFF:
+        case OP_UNUSED_EEFF:
+        case OP_UNUSED_EFFF:
+        case OP_UNUSED_F0FF:
+        case OP_UNUSED_F1FF:
+            LOGE("VFY: unexpected opcode %04x\n", decInsn.opcode);
             okay = false;
             break;
 
@@ -1208,8 +1465,12 @@
              * This instruction is probably a GC point.  Branch instructions
              * only qualify if they go backward, so for those we need to
              * check the offset.
+             *
+             * TODO: we could also scan the targets of a "switch" statement,
+             * and if none of them branch backward we could ignore that
+             * instruction as well.
              */
-            int offset;
+            s4 offset;
             bool unused;
             if ((opFlags & kInstrCanBranch) != 0) {
                 /*
@@ -1217,7 +1478,7 @@
                  * component was tagged with kVfyBranch, but it's easier
                  * to just grab it again than cart the state around.
                  */
-                if (!dvmGetBranchTarget(meth, insnFlags, codeOffset, &offset,
+                if (!dvmGetBranchOffset(meth, insnFlags, codeOffset, &offset,
                         &unused))
                 {
                     /* should never happen */
diff --git a/vm/analysis/Liveness.c b/vm/analysis/Liveness.c
new file mode 100644
index 0000000..2f5173b
--- /dev/null
+++ b/vm/analysis/Liveness.c
@@ -0,0 +1,1082 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Liveness analysis for Dalvik bytecode.
+ */
+#include "Dalvik.h"
+#include "analysis/Liveness.h"
+#include "analysis/CodeVerify.h"
+
+static bool processInstruction(VerifierData* vdata, u4 curIdx,
+    BitVector* workBits);
+static bool markDebugLocals(VerifierData* vdata);
+static void dumpLiveState(const VerifierData* vdata, u4 curIdx,
+    const BitVector* workBits);
+
+
+/*
+ * Create a table of instruction widths that indicate the width of the
+ * *previous* instruction.  The values are copied from the width table
+ * in "vdata", not derived from the instruction stream.
+ *
+ * Caller must free the return value.
+ */
+static InstructionWidth* createBackwardWidthTable(VerifierData* vdata)
+{
+    InstructionWidth* widths;
+
+    widths = (InstructionWidth*)
+            calloc(vdata->insnsSize, sizeof(InstructionWidth));
+    if (widths == NULL)
+        return NULL;
+
+    unsigned int idx;
+    u4 insnWidth = 0;
+    for (idx = 0; idx < vdata->insnsSize; ) {
+        widths[idx] = insnWidth;
+        insnWidth = dvmInsnGetWidth(vdata->insnFlags, idx);
+        idx += insnWidth;
+    }
+
+    return widths;
+}
+
+/*
+ * Compute the "liveness" of every register at all GC points.
+ */
+bool dvmComputeLiveness(VerifierData* vdata)
+{
+    const InsnFlags* insnFlags = vdata->insnFlags;
+    InstructionWidth* backwardWidth;
+    VfyBasicBlock* startGuess = NULL;
+    BitVector* workBits;
+    bool result = false;
+
+    bool verbose = false; //= dvmWantVerboseVerification(vdata->method);
+    if (verbose) {
+        const Method* meth = vdata->method;
+        LOGI("Computing liveness for %s.%s:%s\n",
+            meth->clazz->descriptor, meth->name, meth->shorty);
+    }
+
+    assert(vdata->registerLines != NULL);
+
+    backwardWidth = createBackwardWidthTable(vdata);
+    if (backwardWidth == NULL)
+        goto bail;
+
+    /*
+     * Allocate space for intra-block work set.  Does not include space
+     * for method result "registers", which aren't visible to the GC.
+     * (They would be made live by move-result and then die on the
+     * instruction immediately before it.)
+     */
+    workBits = dvmAllocBitVector(vdata->insnRegCount, false);
+    if (workBits == NULL)
+        goto bail;
+
+    /*
+     * We continue until all blocks have been visited, and no block
+     * requires further attention ("visited" is set and "changed" is
+     * clear).
+     *
+     * TODO: consider creating a "dense" array of basic blocks to make
+     * the walking faster.
+     */
+    int iter = 0;
+    while (true) {
+        VfyBasicBlock* workBlock = NULL;
+        unsigned int idx;
+
+        if (iter++ > 100000) {
+            LOG_VFY_METH(vdata->method, "oh dear");
+            dvmAbort();
+        }
+
+        /*
+         * If a block is marked "changed", we stop and handle it.  If it
+         * just hasn't been visited yet, we remember it but keep searching
+         * for one that has been changed.
+         *
+         * The thought here is that this is more likely to let us work
+         * from end to start, which reduces the amount of re-evaluation
+         * required (both by using "changed" as a work list, and by picking
+         * un-visited blocks from the tail end of the method).
+         */
+        if (startGuess != NULL) {
+            assert(startGuess->changed);
+            workBlock = startGuess;
+        } else {
+            for (idx = 0; idx < vdata->insnsSize; idx++) {
+                VfyBasicBlock* block = vdata->basicBlocks[idx];
+                if (block == NULL)
+                    continue;
+
+                if (block->changed) {
+                    workBlock = block;
+                    break;
+                } else if (!block->visited) {
+                    workBlock = block;
+                }
+            }
+        }
+
+        if (workBlock == NULL) {
+            /* all done */
+            break;
+        }
+
+        assert(workBlock->changed || !workBlock->visited);
+        startGuess = NULL;
+
+        /*
+         * Load work bits.  These represent the liveness of registers
+         * after the last instruction in the block has finished executing.
+         */
+        assert(workBlock->liveRegs != NULL);
+        dvmCopyBitVector(workBits, workBlock->liveRegs);
+        if (verbose) {
+            LOGI("Loaded work bits from last=0x%04x\n", workBlock->lastAddr);
+            dumpLiveState(vdata, 0xfffd, workBlock->liveRegs);
+            dumpLiveState(vdata, 0xffff, workBits);
+        }
+
+        /*
+         * Process a single basic block.
+         *
+         * If this instruction is a GC point, we want to save the result
+         * in the RegisterLine.
+         *
+         * We don't break basic blocks on every GC point -- in particular,
+         * instructions that might throw but have no "try" block don't
+         * end a basic block -- so there could be more than one GC point
+         * in a given basic block.
+         *
+         * We could change this, but it turns out to be not all that useful.
+         * At first glance it appears that we could share the liveness bit
+         * vector between the basic block struct and the register line,
+         * but the basic block needs to reflect the state *after* the
+         * instruction has finished, while the GC points need to describe
+         * the state before the instruction starts.
+         */
+        u4 curIdx = workBlock->lastAddr;
+        while (true) {
+            if (!processInstruction(vdata, curIdx, workBits))
+                goto bail;
+
+            if (verbose) {
+                dumpLiveState(vdata, curIdx + 0x8000, workBits);
+            }
+
+            if (dvmInsnIsGcPoint(insnFlags, curIdx)) {
+                BitVector* lineBits = vdata->registerLines[curIdx].liveRegs;
+                if (lineBits == NULL) {
+                    lineBits = vdata->registerLines[curIdx].liveRegs =
+                        dvmAllocBitVector(vdata->insnRegCount, false);
+                }
+                dvmCopyBitVector(lineBits, workBits);
+            }
+
+            if (curIdx == workBlock->firstAddr)
+                break;
+            assert(curIdx >= backwardWidth[curIdx]);
+            curIdx -= backwardWidth[curIdx];
+        }
+
+        workBlock->visited = true;
+        workBlock->changed = false;
+
+        if (verbose) {
+            dumpLiveState(vdata, curIdx, workBits);
+        }
+
+        /*
+         * Merge changes to all predecessors.  If the new bits don't match
+         * the old bits, set the "changed" flag.
+         */
+        PointerSet* preds = workBlock->predecessors;
+        size_t numPreds = dvmPointerSetGetCount(preds);
+        unsigned int predIdx;
+
+        for (predIdx = 0; predIdx < numPreds; predIdx++) {
+            VfyBasicBlock* pred =
+                    (VfyBasicBlock*) dvmPointerSetGetEntry(preds, predIdx);
+
+            pred->changed = dvmCheckMergeBitVectors(pred->liveRegs, workBits);
+            if (verbose) {
+                LOGI("merging cur=%04x into pred last=%04x (ch=%d)\n",
+                    curIdx, pred->lastAddr, pred->changed);
+                dumpLiveState(vdata, 0xfffa, pred->liveRegs);
+                dumpLiveState(vdata, 0xfffb, workBits);
+            }
+
+            /*
+             * We want to set the "changed" flag on unvisited predecessors
+             * as a way of guiding the verifier through basic blocks in
+             * a reasonable order.  We can't count on variable liveness
+             * changing, so we force "changed" to true even if it hasn't.
+             */
+            if (!pred->visited)
+                pred->changed = true;
+
+            /*
+             * Keep track of one of the changed blocks so we can start
+             * there instead of having to scan through the list.
+             */
+            if (pred->changed)
+                startGuess = pred;
+        }
+    }
+
+#ifndef NDEBUG
+    /*
+     * Sanity check: verify that all GC point register lines have a
+     * liveness bit vector allocated.  Also, we're not expecting non-GC
+     * points to have them.
+     */
+    u4 checkIdx;
+    for (checkIdx = 0; checkIdx < vdata->insnsSize; ) {
+        if (dvmInsnIsGcPoint(insnFlags, checkIdx)) {
+            if (vdata->registerLines[checkIdx].liveRegs == NULL) {
+                LOG_VFY_METH(vdata->method,
+                    "GLITCH: no liveRegs for GC point 0x%04x\n", checkIdx);
+                dvmAbort();
+            }
+        } else if (vdata->registerLines[checkIdx].liveRegs != NULL) {
+            LOG_VFY_METH(vdata->method,
+                "GLITCH: liveRegs for non-GC point 0x%04x\n", checkIdx);
+            dvmAbort();
+        }
+        u4 insnWidth = dvmInsnGetWidth(insnFlags, checkIdx);
+        checkIdx += insnWidth;
+    }
+#endif
+
+    /*
+     * Factor in the debug info, if any.
+     */
+    if (!markDebugLocals(vdata))
+        goto bail;
+
+    result = true;
+
+bail:
+    free(backwardWidth);
+    return result;
+}
+
+
+/*
+ * Add a register to the LIVE set.
+ */
+static inline void GEN(BitVector* workBits, u4 regIndex)
+{
+    dvmSetBit(workBits, regIndex);
+}
+
+/*
+ * Add a register pair to the LIVE set.
+ */
+static inline void GENW(BitVector* workBits, u4 regIndex)
+{
+    dvmSetBit(workBits, regIndex);
+    dvmSetBit(workBits, regIndex+1);
+}
+
+/*
+ * Remove a register from the LIVE set.
+ */
+static inline void KILL(BitVector* workBits, u4 regIndex)
+{
+    dvmClearBit(workBits, regIndex);
+}
+
+/*
+ * Remove a register pair from the LIVE set.
+ */
+static inline void KILLW(BitVector* workBits, u4 regIndex)
+{
+    dvmClearBit(workBits, regIndex);
+    dvmClearBit(workBits, regIndex+1);
+}
+
+/*
+ * Process a single instruction.
+ *
+ * Returns "false" if something goes fatally wrong.
+ */
+static bool processInstruction(VerifierData* vdata, u4 insnIdx,
+    BitVector* workBits)
+{
+    const Method* meth = vdata->method;
+    const u2* insns = meth->insns + insnIdx;
+    DecodedInstruction decInsn;
+
+    dexDecodeInstruction(insns, &decInsn);
+
+    /*
+     * Add registers to the "GEN" or "KILL" sets.  We want to do KILL
+     * before GEN to handle cases where the source and destination
+     * register is the same.
+     */
+    switch (decInsn.opcode) {
+    case OP_NOP:
+    case OP_RETURN_VOID:
+    case OP_GOTO:
+    case OP_GOTO_16:
+    case OP_GOTO_32:
+        /* no registers are used */
+        break;
+
+    case OP_RETURN:
+    case OP_RETURN_OBJECT:
+    case OP_MONITOR_ENTER:
+    case OP_MONITOR_EXIT:
+    case OP_CHECK_CAST:
+    case OP_CHECK_CAST_JUMBO:
+    case OP_THROW:
+    case OP_PACKED_SWITCH:
+    case OP_SPARSE_SWITCH:
+    case OP_FILL_ARRAY_DATA:
+    case OP_IF_EQZ:
+    case OP_IF_NEZ:
+    case OP_IF_LTZ:
+    case OP_IF_GEZ:
+    case OP_IF_GTZ:
+    case OP_IF_LEZ:
+    case OP_SPUT:
+    case OP_SPUT_JUMBO:
+    case OP_SPUT_BOOLEAN:
+    case OP_SPUT_BOOLEAN_JUMBO:
+    case OP_SPUT_BYTE:
+    case OP_SPUT_BYTE_JUMBO:
+    case OP_SPUT_CHAR:
+    case OP_SPUT_CHAR_JUMBO:
+    case OP_SPUT_SHORT:
+    case OP_SPUT_SHORT_JUMBO:
+    case OP_SPUT_OBJECT:
+    case OP_SPUT_OBJECT_JUMBO:
+        /* action <- vA */
+        GEN(workBits, decInsn.vA);
+        break;
+
+    case OP_RETURN_WIDE:
+    case OP_SPUT_WIDE:
+    case OP_SPUT_WIDE_JUMBO:
+        /* action <- vA(wide) */
+        GENW(workBits, decInsn.vA);
+        break;
+
+    case OP_IF_EQ:
+    case OP_IF_NE:
+    case OP_IF_LT:
+    case OP_IF_GE:
+    case OP_IF_GT:
+    case OP_IF_LE:
+    case OP_IPUT:
+    case OP_IPUT_JUMBO:
+    case OP_IPUT_BOOLEAN:
+    case OP_IPUT_BOOLEAN_JUMBO:
+    case OP_IPUT_BYTE:
+    case OP_IPUT_BYTE_JUMBO:
+    case OP_IPUT_CHAR:
+    case OP_IPUT_CHAR_JUMBO:
+    case OP_IPUT_SHORT:
+    case OP_IPUT_SHORT_JUMBO:
+    case OP_IPUT_OBJECT:
+    case OP_IPUT_OBJECT_JUMBO:
+        /* action <- vA, vB */
+        GEN(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_IPUT_WIDE:
+    case OP_IPUT_WIDE_JUMBO:
+        /* action <- vA(wide), vB */
+        GENW(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_APUT:
+    case OP_APUT_BOOLEAN:
+    case OP_APUT_BYTE:
+    case OP_APUT_CHAR:
+    case OP_APUT_SHORT:
+    case OP_APUT_OBJECT:
+        /* action <- vA, vB, vC */
+        GEN(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        GEN(workBits, decInsn.vC);
+        break;
+
+    case OP_APUT_WIDE:
+        /* action <- vA(wide), vB, vC */
+        GENW(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        GEN(workBits, decInsn.vC);
+        break;
+
+    case OP_FILLED_NEW_ARRAY:
+    case OP_INVOKE_VIRTUAL:
+    case OP_INVOKE_SUPER:
+    case OP_INVOKE_DIRECT:
+    case OP_INVOKE_STATIC:
+    case OP_INVOKE_INTERFACE:
+        /* action <- vararg */
+        {
+            unsigned int idx;
+            for (idx = 0; idx < decInsn.vA; idx++) {
+                GEN(workBits, decInsn.arg[idx]);
+            }
+        }
+        break;
+
+    case OP_FILLED_NEW_ARRAY_RANGE:
+    case OP_FILLED_NEW_ARRAY_JUMBO:
+    case OP_INVOKE_VIRTUAL_RANGE:
+    case OP_INVOKE_VIRTUAL_JUMBO:
+    case OP_INVOKE_SUPER_RANGE:
+    case OP_INVOKE_SUPER_JUMBO:
+    case OP_INVOKE_DIRECT_RANGE:
+    case OP_INVOKE_DIRECT_JUMBO:
+    case OP_INVOKE_STATIC_RANGE:
+    case OP_INVOKE_STATIC_JUMBO:
+    case OP_INVOKE_INTERFACE_RANGE:
+    case OP_INVOKE_INTERFACE_JUMBO:
+        /* action <- vararg/range */
+        {
+            unsigned int idx;
+            for (idx = 0; idx < decInsn.vA; idx++) {
+                GEN(workBits, decInsn.vC + idx);
+            }
+        }
+        break;
+
+    case OP_MOVE_RESULT:
+    case OP_MOVE_RESULT_WIDE:
+    case OP_MOVE_RESULT_OBJECT:
+    case OP_MOVE_EXCEPTION:
+    case OP_CONST_4:
+    case OP_CONST_16:
+    case OP_CONST:
+    case OP_CONST_HIGH16:
+    case OP_CONST_STRING:
+    case OP_CONST_STRING_JUMBO:
+    case OP_CONST_CLASS:
+    case OP_CONST_CLASS_JUMBO:
+    case OP_NEW_INSTANCE:
+    case OP_NEW_INSTANCE_JUMBO:
+    case OP_SGET:
+    case OP_SGET_JUMBO:
+    case OP_SGET_BOOLEAN:
+    case OP_SGET_BOOLEAN_JUMBO:
+    case OP_SGET_BYTE:
+    case OP_SGET_BYTE_JUMBO:
+    case OP_SGET_CHAR:
+    case OP_SGET_CHAR_JUMBO:
+    case OP_SGET_SHORT:
+    case OP_SGET_SHORT_JUMBO:
+    case OP_SGET_OBJECT:
+    case OP_SGET_OBJECT_JUMBO:
+        /* vA <- value */
+        KILL(workBits, decInsn.vA);
+        break;
+
+    case OP_CONST_WIDE_16:
+    case OP_CONST_WIDE_32:
+    case OP_CONST_WIDE:
+    case OP_CONST_WIDE_HIGH16:
+    case OP_SGET_WIDE:
+    case OP_SGET_WIDE_JUMBO:
+        /* vA(wide) <- value */
+        KILLW(workBits, decInsn.vA);
+        break;
+
+    case OP_MOVE:
+    case OP_MOVE_FROM16:
+    case OP_MOVE_16:
+    case OP_MOVE_OBJECT:
+    case OP_MOVE_OBJECT_FROM16:
+    case OP_MOVE_OBJECT_16:
+    case OP_INSTANCE_OF:
+    case OP_INSTANCE_OF_JUMBO:
+    case OP_ARRAY_LENGTH:
+    case OP_NEW_ARRAY:
+    case OP_NEW_ARRAY_JUMBO:
+    case OP_IGET:
+    case OP_IGET_JUMBO:
+    case OP_IGET_BOOLEAN:
+    case OP_IGET_BOOLEAN_JUMBO:
+    case OP_IGET_BYTE:
+    case OP_IGET_BYTE_JUMBO:
+    case OP_IGET_CHAR:
+    case OP_IGET_CHAR_JUMBO:
+    case OP_IGET_SHORT:
+    case OP_IGET_SHORT_JUMBO:
+    case OP_IGET_OBJECT:
+    case OP_IGET_OBJECT_JUMBO:
+    case OP_NEG_INT:
+    case OP_NOT_INT:
+    case OP_NEG_FLOAT:
+    case OP_INT_TO_FLOAT:
+    case OP_FLOAT_TO_INT:
+    case OP_INT_TO_BYTE:
+    case OP_INT_TO_CHAR:
+    case OP_INT_TO_SHORT:
+    case OP_ADD_INT_LIT16:
+    case OP_RSUB_INT:
+    case OP_MUL_INT_LIT16:
+    case OP_DIV_INT_LIT16:
+    case OP_REM_INT_LIT16:
+    case OP_AND_INT_LIT16:
+    case OP_OR_INT_LIT16:
+    case OP_XOR_INT_LIT16:
+    case OP_ADD_INT_LIT8:
+    case OP_RSUB_INT_LIT8:
+    case OP_MUL_INT_LIT8:
+    case OP_DIV_INT_LIT8:
+    case OP_REM_INT_LIT8:
+    case OP_SHL_INT_LIT8:
+    case OP_SHR_INT_LIT8:
+    case OP_USHR_INT_LIT8:
+    case OP_AND_INT_LIT8:
+    case OP_OR_INT_LIT8:
+    case OP_XOR_INT_LIT8:
+        /* vA <- vB */
+        KILL(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_IGET_WIDE:
+    case OP_IGET_WIDE_JUMBO:
+    case OP_INT_TO_LONG:
+    case OP_INT_TO_DOUBLE:
+    case OP_FLOAT_TO_LONG:
+    case OP_FLOAT_TO_DOUBLE:
+        /* vA(wide) <- vB */
+        KILLW(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_LONG_TO_INT:
+    case OP_LONG_TO_FLOAT:
+    case OP_DOUBLE_TO_INT:
+    case OP_DOUBLE_TO_FLOAT:
+        /* vA <- vB(wide) */
+        KILL(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        break;
+
+    case OP_MOVE_WIDE:
+    case OP_MOVE_WIDE_FROM16:
+    case OP_MOVE_WIDE_16:
+    case OP_NEG_LONG:
+    case OP_NOT_LONG:
+    case OP_NEG_DOUBLE:
+    case OP_LONG_TO_DOUBLE:
+    case OP_DOUBLE_TO_LONG:
+        /* vA(wide) <- vB(wide) */
+        KILLW(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        break;
+
+    case OP_CMPL_FLOAT:
+    case OP_CMPG_FLOAT:
+    case OP_AGET:
+    case OP_AGET_BOOLEAN:
+    case OP_AGET_BYTE:
+    case OP_AGET_CHAR:
+    case OP_AGET_SHORT:
+    case OP_AGET_OBJECT:
+    case OP_ADD_INT:
+    case OP_SUB_INT:
+    case OP_MUL_INT:
+    case OP_REM_INT:
+    case OP_DIV_INT:
+    case OP_AND_INT:
+    case OP_OR_INT:
+    case OP_XOR_INT:
+    case OP_SHL_INT:
+    case OP_SHR_INT:
+    case OP_USHR_INT:
+    case OP_ADD_FLOAT:
+    case OP_SUB_FLOAT:
+    case OP_MUL_FLOAT:
+    case OP_DIV_FLOAT:
+    case OP_REM_FLOAT:
+        /* vA <- vB, vC */
+        KILL(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        GEN(workBits, decInsn.vC);
+        break;
+
+    case OP_AGET_WIDE:
+        /* vA(wide) <- vB, vC */
+        KILLW(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        GEN(workBits, decInsn.vC);
+        break;
+
+    case OP_CMPL_DOUBLE:
+    case OP_CMPG_DOUBLE:
+    case OP_CMP_LONG:
+        /* vA <- vB(wide), vC(wide) */
+        KILL(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        GENW(workBits, decInsn.vC);
+        break;
+
+    case OP_SHL_LONG:
+    case OP_SHR_LONG:
+    case OP_USHR_LONG:
+        /* vA(wide) <- vB(wide), vC */
+        KILLW(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        GEN(workBits, decInsn.vC);
+        break;
+
+    case OP_ADD_LONG:
+    case OP_SUB_LONG:
+    case OP_MUL_LONG:
+    case OP_DIV_LONG:
+    case OP_REM_LONG:
+    case OP_AND_LONG:
+    case OP_OR_LONG:
+    case OP_XOR_LONG:
+    case OP_ADD_DOUBLE:
+    case OP_SUB_DOUBLE:
+    case OP_MUL_DOUBLE:
+    case OP_DIV_DOUBLE:
+    case OP_REM_DOUBLE:
+        /* vA(wide) <- vB(wide), vC(wide) */
+        KILLW(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        GENW(workBits, decInsn.vC);
+        break;
+
+    case OP_ADD_INT_2ADDR:
+    case OP_SUB_INT_2ADDR:
+    case OP_MUL_INT_2ADDR:
+    case OP_REM_INT_2ADDR:
+    case OP_SHL_INT_2ADDR:
+    case OP_SHR_INT_2ADDR:
+    case OP_USHR_INT_2ADDR:
+    case OP_AND_INT_2ADDR:
+    case OP_OR_INT_2ADDR:
+    case OP_XOR_INT_2ADDR:
+    case OP_DIV_INT_2ADDR:
+        /* vA <- vA, vB */
+        /* KILL(workBits, decInsn.vA); */
+        GEN(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_SHL_LONG_2ADDR:
+    case OP_SHR_LONG_2ADDR:
+    case OP_USHR_LONG_2ADDR:
+        /* vA(wide) <- vA(wide), vB */
+        /* KILLW(workBits, decInsn.vA); */
+        GENW(workBits, decInsn.vA);
+        GEN(workBits, decInsn.vB);
+        break;
+
+    case OP_ADD_LONG_2ADDR:
+    case OP_SUB_LONG_2ADDR:
+    case OP_MUL_LONG_2ADDR:
+    case OP_DIV_LONG_2ADDR:
+    case OP_REM_LONG_2ADDR:
+    case OP_AND_LONG_2ADDR:
+    case OP_OR_LONG_2ADDR:
+    case OP_XOR_LONG_2ADDR:
+    case OP_ADD_FLOAT_2ADDR:
+    case OP_SUB_FLOAT_2ADDR:
+    case OP_MUL_FLOAT_2ADDR:
+    case OP_DIV_FLOAT_2ADDR:
+    case OP_REM_FLOAT_2ADDR:
+    case OP_ADD_DOUBLE_2ADDR:
+    case OP_SUB_DOUBLE_2ADDR:
+    case OP_MUL_DOUBLE_2ADDR:
+    case OP_DIV_DOUBLE_2ADDR:
+    case OP_REM_DOUBLE_2ADDR:
+        /* vA(wide) <- vA(wide), vB(wide) */
+        /* KILLW(workBits, decInsn.vA); */
+        GENW(workBits, decInsn.vA);
+        GENW(workBits, decInsn.vB);
+        break;
+
+    /* we will only see this if liveness analysis is done after general vfy */
+    case OP_THROW_VERIFICATION_ERROR:
+    case OP_THROW_VERIFICATION_ERROR_JUMBO:
+        /* no registers used */
+        break;
+
+    /* quickened instructions, not expected to appear */
+    case OP_EXECUTE_INLINE:
+    case OP_EXECUTE_INLINE_RANGE:
+    case OP_IGET_QUICK:
+    case OP_IGET_WIDE_QUICK:
+    case OP_IGET_OBJECT_QUICK:
+    case OP_IPUT_QUICK:
+    case OP_IPUT_WIDE_QUICK:
+    case OP_IPUT_OBJECT_QUICK:
+    case OP_INVOKE_VIRTUAL_QUICK:
+    case OP_INVOKE_VIRTUAL_QUICK_RANGE:
+    case OP_INVOKE_SUPER_QUICK:
+    case OP_INVOKE_SUPER_QUICK_RANGE:
+        /* fall through to failure */
+
+    /* correctness fixes, not expected to appear */
+    case OP_INVOKE_OBJECT_INIT_RANGE:
+    case OP_INVOKE_OBJECT_INIT_JUMBO:
+    case OP_RETURN_VOID_BARRIER:
+    case OP_SPUT_VOLATILE:
+    case OP_SPUT_VOLATILE_JUMBO:
+    case OP_SPUT_OBJECT_VOLATILE:
+    case OP_SPUT_OBJECT_VOLATILE_JUMBO:
+    case OP_SPUT_WIDE_VOLATILE:
+    case OP_SPUT_WIDE_VOLATILE_JUMBO:
+    case OP_IPUT_VOLATILE:
+    case OP_IPUT_VOLATILE_JUMBO:
+    case OP_IPUT_OBJECT_VOLATILE:
+    case OP_IPUT_OBJECT_VOLATILE_JUMBO:
+    case OP_IPUT_WIDE_VOLATILE:
+    case OP_IPUT_WIDE_VOLATILE_JUMBO:
+    case OP_SGET_VOLATILE:
+    case OP_SGET_VOLATILE_JUMBO:
+    case OP_SGET_OBJECT_VOLATILE:
+    case OP_SGET_OBJECT_VOLATILE_JUMBO:
+    case OP_SGET_WIDE_VOLATILE:
+    case OP_SGET_WIDE_VOLATILE_JUMBO:
+    case OP_IGET_VOLATILE:
+    case OP_IGET_VOLATILE_JUMBO:
+    case OP_IGET_OBJECT_VOLATILE:
+    case OP_IGET_OBJECT_VOLATILE_JUMBO:
+    case OP_IGET_WIDE_VOLATILE:
+    case OP_IGET_WIDE_VOLATILE_JUMBO:
+        /* fall through to failure */
+
+    /* these should never appear during verification */
+    case OP_UNUSED_3E:
+    case OP_UNUSED_3F:
+    case OP_UNUSED_40:
+    case OP_UNUSED_41:
+    case OP_UNUSED_42:
+    case OP_UNUSED_43:
+    case OP_UNUSED_73:
+    case OP_UNUSED_79:
+    case OP_UNUSED_7A:
+    case OP_BREAKPOINT:
+    case OP_DISPATCH_FF:
+    case OP_UNUSED_27FF:
+    case OP_UNUSED_28FF:
+    case OP_UNUSED_29FF:
+    case OP_UNUSED_2AFF:
+    case OP_UNUSED_2BFF:
+    case OP_UNUSED_2CFF:
+    case OP_UNUSED_2DFF:
+    case OP_UNUSED_2EFF:
+    case OP_UNUSED_2FFF:
+    case OP_UNUSED_30FF:
+    case OP_UNUSED_31FF:
+    case OP_UNUSED_32FF:
+    case OP_UNUSED_33FF:
+    case OP_UNUSED_34FF:
+    case OP_UNUSED_35FF:
+    case OP_UNUSED_36FF:
+    case OP_UNUSED_37FF:
+    case OP_UNUSED_38FF:
+    case OP_UNUSED_39FF:
+    case OP_UNUSED_3AFF:
+    case OP_UNUSED_3BFF:
+    case OP_UNUSED_3CFF:
+    case OP_UNUSED_3DFF:
+    case OP_UNUSED_3EFF:
+    case OP_UNUSED_3FFF:
+    case OP_UNUSED_40FF:
+    case OP_UNUSED_41FF:
+    case OP_UNUSED_42FF:
+    case OP_UNUSED_43FF:
+    case OP_UNUSED_44FF:
+    case OP_UNUSED_45FF:
+    case OP_UNUSED_46FF:
+    case OP_UNUSED_47FF:
+    case OP_UNUSED_48FF:
+    case OP_UNUSED_49FF:
+    case OP_UNUSED_4AFF:
+    case OP_UNUSED_4BFF:
+    case OP_UNUSED_4CFF:
+    case OP_UNUSED_4DFF:
+    case OP_UNUSED_4EFF:
+    case OP_UNUSED_4FFF:
+    case OP_UNUSED_50FF:
+    case OP_UNUSED_51FF:
+    case OP_UNUSED_52FF:
+    case OP_UNUSED_53FF:
+    case OP_UNUSED_54FF:
+    case OP_UNUSED_55FF:
+    case OP_UNUSED_56FF:
+    case OP_UNUSED_57FF:
+    case OP_UNUSED_58FF:
+    case OP_UNUSED_59FF:
+    case OP_UNUSED_5AFF:
+    case OP_UNUSED_5BFF:
+    case OP_UNUSED_5CFF:
+    case OP_UNUSED_5DFF:
+    case OP_UNUSED_5EFF:
+    case OP_UNUSED_5FFF:
+    case OP_UNUSED_60FF:
+    case OP_UNUSED_61FF:
+    case OP_UNUSED_62FF:
+    case OP_UNUSED_63FF:
+    case OP_UNUSED_64FF:
+    case OP_UNUSED_65FF:
+    case OP_UNUSED_66FF:
+    case OP_UNUSED_67FF:
+    case OP_UNUSED_68FF:
+    case OP_UNUSED_69FF:
+    case OP_UNUSED_6AFF:
+    case OP_UNUSED_6BFF:
+    case OP_UNUSED_6CFF:
+    case OP_UNUSED_6DFF:
+    case OP_UNUSED_6EFF:
+    case OP_UNUSED_6FFF:
+    case OP_UNUSED_70FF:
+    case OP_UNUSED_71FF:
+    case OP_UNUSED_72FF:
+    case OP_UNUSED_73FF:
+    case OP_UNUSED_74FF:
+    case OP_UNUSED_75FF:
+    case OP_UNUSED_76FF:
+    case OP_UNUSED_77FF:
+    case OP_UNUSED_78FF:
+    case OP_UNUSED_79FF:
+    case OP_UNUSED_7AFF:
+    case OP_UNUSED_7BFF:
+    case OP_UNUSED_7CFF:
+    case OP_UNUSED_7DFF:
+    case OP_UNUSED_7EFF:
+    case OP_UNUSED_7FFF:
+    case OP_UNUSED_80FF:
+    case OP_UNUSED_81FF:
+    case OP_UNUSED_82FF:
+    case OP_UNUSED_83FF:
+    case OP_UNUSED_84FF:
+    case OP_UNUSED_85FF:
+    case OP_UNUSED_86FF:
+    case OP_UNUSED_87FF:
+    case OP_UNUSED_88FF:
+    case OP_UNUSED_89FF:
+    case OP_UNUSED_8AFF:
+    case OP_UNUSED_8BFF:
+    case OP_UNUSED_8CFF:
+    case OP_UNUSED_8DFF:
+    case OP_UNUSED_8EFF:
+    case OP_UNUSED_8FFF:
+    case OP_UNUSED_90FF:
+    case OP_UNUSED_91FF:
+    case OP_UNUSED_92FF:
+    case OP_UNUSED_93FF:
+    case OP_UNUSED_94FF:
+    case OP_UNUSED_95FF:
+    case OP_UNUSED_96FF:
+    case OP_UNUSED_97FF:
+    case OP_UNUSED_98FF:
+    case OP_UNUSED_99FF:
+    case OP_UNUSED_9AFF:
+    case OP_UNUSED_9BFF:
+    case OP_UNUSED_9CFF:
+    case OP_UNUSED_9DFF:
+    case OP_UNUSED_9EFF:
+    case OP_UNUSED_9FFF:
+    case OP_UNUSED_A0FF:
+    case OP_UNUSED_A1FF:
+    case OP_UNUSED_A2FF:
+    case OP_UNUSED_A3FF:
+    case OP_UNUSED_A4FF:
+    case OP_UNUSED_A5FF:
+    case OP_UNUSED_A6FF:
+    case OP_UNUSED_A7FF:
+    case OP_UNUSED_A8FF:
+    case OP_UNUSED_A9FF:
+    case OP_UNUSED_AAFF:
+    case OP_UNUSED_ABFF:
+    case OP_UNUSED_ACFF:
+    case OP_UNUSED_ADFF:
+    case OP_UNUSED_AEFF:
+    case OP_UNUSED_AFFF:
+    case OP_UNUSED_B0FF:
+    case OP_UNUSED_B1FF:
+    case OP_UNUSED_B2FF:
+    case OP_UNUSED_B3FF:
+    case OP_UNUSED_B4FF:
+    case OP_UNUSED_B5FF:
+    case OP_UNUSED_B6FF:
+    case OP_UNUSED_B7FF:
+    case OP_UNUSED_B8FF:
+    case OP_UNUSED_B9FF:
+    case OP_UNUSED_BAFF:
+    case OP_UNUSED_BBFF:
+    case OP_UNUSED_BCFF:
+    case OP_UNUSED_BDFF:
+    case OP_UNUSED_BEFF:
+    case OP_UNUSED_BFFF:
+    case OP_UNUSED_C0FF:
+    case OP_UNUSED_C1FF:
+    case OP_UNUSED_C2FF:
+    case OP_UNUSED_C3FF:
+    case OP_UNUSED_C4FF:
+    case OP_UNUSED_C5FF:
+    case OP_UNUSED_C6FF:
+    case OP_UNUSED_C7FF:
+    case OP_UNUSED_C8FF:
+    case OP_UNUSED_C9FF:
+    case OP_UNUSED_CAFF:
+    case OP_UNUSED_CBFF:
+    case OP_UNUSED_CCFF:
+    case OP_UNUSED_CDFF:
+    case OP_UNUSED_CEFF:
+    case OP_UNUSED_CFFF:
+    case OP_UNUSED_D0FF:
+    case OP_UNUSED_D1FF:
+    case OP_UNUSED_D2FF:
+    case OP_UNUSED_D3FF:
+    case OP_UNUSED_D4FF:
+    case OP_UNUSED_D5FF:
+    case OP_UNUSED_D6FF:
+    case OP_UNUSED_D7FF:
+    case OP_UNUSED_D8FF:
+    case OP_UNUSED_D9FF:
+    case OP_UNUSED_DAFF:
+    case OP_UNUSED_DBFF:
+    case OP_UNUSED_DCFF:
+    case OP_UNUSED_DDFF:
+    case OP_UNUSED_DEFF:
+    case OP_UNUSED_DFFF:
+    case OP_UNUSED_E0FF:
+    case OP_UNUSED_E1FF:
+    case OP_UNUSED_E2FF:
+    case OP_UNUSED_E3FF:
+    case OP_UNUSED_E4FF:
+    case OP_UNUSED_E5FF:
+    case OP_UNUSED_E6FF:
+    case OP_UNUSED_E7FF:
+    case OP_UNUSED_E8FF:
+    case OP_UNUSED_E9FF:
+    case OP_UNUSED_EAFF:
+    case OP_UNUSED_EBFF:
+    case OP_UNUSED_ECFF:
+    case OP_UNUSED_EDFF:
+    case OP_UNUSED_EEFF:
+    case OP_UNUSED_EFFF:
+    case OP_UNUSED_F0FF:
+    case OP_UNUSED_F1FF:
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * This is a dexDecodeDebugInfo callback, used by markDebugLocals().
+ */
+static void markLocalsCb(void* ctxt, u2 reg, u4 startAddress, u4 endAddress,
+    const char* name, const char* descriptor, const char* signature)
+{
+    VerifierData* vdata = (VerifierData*) ctxt;
+    bool verbose = dvmWantVerboseVerification(vdata->method);
+
+    if (verbose) {
+        LOGI("%04x-%04x %2d (%s %s)\n",
+            startAddress, endAddress, reg, name, descriptor);
+    }
+
+    bool wide = (descriptor[0] == 'D' || descriptor[0] == 'J');
+    assert(reg <= vdata->insnRegCount + (wide ? 1 : 0));
+
+    /*
+     * Set the bit in all GC point instructions in the range
+     * [startAddress, endAddress).
+     */
+    unsigned int idx;
+    for (idx = startAddress; idx < endAddress; idx++) {
+        BitVector* liveRegs = vdata->registerLines[idx].liveRegs;
+        if (liveRegs != NULL) {
+            if (wide) {
+                GENW(liveRegs, reg);
+            } else {
+                GEN(liveRegs, reg);
+            }
+        }
+    }
+}
+
+/*
+ * Mark all debugger-visible locals as live.
+ *
+ * The "locals" table describes the positions of the various locals in the
+ * stack frame based on the current execution address.  If the debugger
+ * wants to display one, it issues a request by "slot number".  We need
+ * to ensure that references in stack slots that might be queried by the
+ * debugger aren't GCed.
+ *
+ * (If the GC had some way to mark the slot as invalid we wouldn't have
+ * to do this.  We could also have the debugger interface check the
+ * register map and simply refuse to return a "dead" value, but that's
+ * potentially confusing since the referred-to object might actually be
+ * alive, and being able to see it without having to hunt around for a
+ * "live" stack frame is useful.)
+ */
+static bool markDebugLocals(VerifierData* vdata)
+{
+    const Method* meth = vdata->method;
+
+    dexDecodeDebugInfo(meth->clazz->pDvmDex->pDexFile, dvmGetMethodCode(meth),
+        meth->clazz->descriptor, meth->prototype.protoIdx, meth->accessFlags,
+        NULL, markLocalsCb, vdata);
+
+    return true;
+}
+
+
+/*
+ * Dump the liveness bits to the log.
+ *
+ * "curIdx" is for display only.
+ */
+static void dumpLiveState(const VerifierData* vdata, u4 curIdx,
+    const BitVector* workBits)
+{
+    u4 insnRegCount = vdata->insnRegCount;
+    size_t regCharSize = insnRegCount + (insnRegCount-1)/4 + 2 +1;
+    char regChars[regCharSize +1];
+    unsigned int idx;
+
+    memset(regChars, ' ', regCharSize);
+    regChars[0] = '[';
+    if (insnRegCount == 0)
+        regChars[1] = ']';
+    else
+        regChars[1 + (insnRegCount-1) + (insnRegCount-1)/4 +1] = ']';
+    regChars[regCharSize] = '\0';
+
+    for (idx = 0; idx < insnRegCount; idx++) {
+        char ch = dvmIsBitSet(workBits, idx) ? '+' : '-';
+        regChars[1 + idx + (idx/4)] = ch;
+    }
+
+    LOGI("0x%04x %s\n", curIdx, regChars);
+}
diff --git a/vm/analysis/Liveness.h b/vm/analysis/Liveness.h
new file mode 100644
index 0000000..6436f66
--- /dev/null
+++ b/vm/analysis/Liveness.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Liveness analysis.
+ */
+#ifndef _DALVIK_LIVENESS
+#define _DALVIK_LIVENESS
+
+struct VerifierData;
+
+bool dvmComputeLiveness(struct VerifierData* vdata);
+
+#endif /*_DALVIK_LIVENESS*/
diff --git a/vm/analysis/Optimize.c b/vm/analysis/Optimize.c
index 834f64f..2bacc5b 100644
--- a/vm/analysis/Optimize.c
+++ b/vm/analysis/Optimize.c
@@ -41,7 +41,7 @@
     Opcode volatileOpc);
 static bool rewriteStaticField(Method* method, u2* insns, Opcode volatileOpc);
 static bool rewriteVirtualInvoke(Method* method, u2* insns, Opcode newOpc);
-static bool rewriteEmptyDirectInvoke(Method* method, u2* insns);
+static bool rewriteInvokeObjectInit(Method* method, u2* insns);
 static bool rewriteExecuteInline(Method* method, u2* insns,
     MethodType methodType);
 static bool rewriteExecuteInlineRange(Method* method, u2* insns,
@@ -51,51 +51,59 @@
 
 
 /*
- * Create a table of inline substitutions.
+ * Create a table of inline substitutions.  Sets gDvm.inlineSubs.
  *
  * TODO: this is currently just a linear array.  We will want to put this
  * into a hash table as the list size increases.
  */
-InlineSub* dvmCreateInlineSubsTable(void)
+bool dvmCreateInlineSubsTable(void)
 {
     const InlineOperation* ops = dvmGetInlineOpsTable();
     const int count = dvmGetInlineOpsTableLength();
     InlineSub* table;
     int i, tableIndex;
 
+    assert(gDvm.inlineSubs == NULL);
+
     /*
-     * Allocate for optimism: one slot per entry, plus an end-of-list marker.
+     * One slot per entry, plus an end-of-list marker.
      */
-    table = calloc(count + 1, sizeof(InlineSub));
+    table = (InlineSub*) calloc(count + 1, sizeof(InlineSub));
 
     tableIndex = 0;
     for (i = 0; i < count; i++) {
         Method* method = dvmFindInlinableMethod(ops[i].classDescriptor,
             ops[i].methodName, ops[i].methodSignature);
-        if (method != NULL) {
-            table[tableIndex].method = method;
-            table[tableIndex].inlineIdx = i;
-            tableIndex++;
-
-            LOGV("DexOpt: will inline %d: %s.%s %s\n", i,
+        if (method == NULL) {
+            /*
+             * Not expected.  We only use this for key methods in core
+             * classes, so we should always be able to find them.
+             */
+            LOGE("Unable to find method for inlining: %s.%s:%s\n",
                 ops[i].classDescriptor, ops[i].methodName,
                 ops[i].methodSignature);
+            return false;
         }
+
+        table[tableIndex].method = method;
+        table[tableIndex].inlineIdx = i;
+        tableIndex++;
     }
 
     /* mark end of table */
     table[tableIndex].method = NULL;
-    LOGV("DexOpt: inline table has %d entries\n", tableIndex);
 
-    return table;
+    gDvm.inlineSubs = table;
+    return true;
 }
 
 /*
  * Release inline sub data structure.
  */
-void dvmFreeInlineSubsTable(InlineSub* inlineSubs)
+void dvmFreeInlineSubsTable(void)
 {
-    free(inlineSubs);
+    free(gDvm.inlineSubs);
+    gDvm.inlineSubs = NULL;
 }
 
 
@@ -139,11 +147,6 @@
     u2* insns;
     u2 inst;
 
-    if (!gDvm.optimizing && !essentialOnly) {
-        /* unexpected; will force copy-on-write of a lot of pages */
-        LOGD("NOTE: doing full bytecode optimization outside dexopt\n");
-    }
-
     if (dvmIsNativeMethod(method) || dvmIsAbstractMethod(method))
         return;
 
@@ -161,7 +164,21 @@
 
         inst = *insns & 0xff;
 
-        /* "essential" substitutions, always checked */
+        /*
+         * essential substitutions:
+         *  {iget,iput,sget,sput}-wide --> *-wide-volatile
+         *  invoke-direct[/range] --> invoke-object-init/range
+         *
+         * essential-on-SMP substitutions:
+         *  iget-* --> iget-*-volatile
+         *  iput-* --> iput-*-volatile
+         *
+         * non-essential substitutions:
+         *  iget-* --> iget-*-quick
+         *  iput-* --> iput-*-quick
+         *
+         * TODO: might be time to merge this with the other two switches
+         */
         switch (inst) {
         case OP_IGET:
         case OP_IGET_BOOLEAN:
@@ -200,7 +217,7 @@
                 volatileOpc = OP_IPUT_OBJECT_VOLATILE;
 rewrite_inst_field:
             if (essentialOnly)
-                quickOpc = OP_NOP;
+                quickOpc = OP_NOP;      /* if not essential, no "-quick" sub */
             if (quickOpc != OP_NOP || volatileOpc != OP_NOP)
                 rewriteInstField(method, insns, quickOpc, volatileOpc);
             break;
@@ -213,13 +230,25 @@
 rewrite_static_field:
             rewriteStaticField(method, insns, volatileOpc);
             break;
+
+        case OP_INVOKE_DIRECT:
+            /* TODO: also handle invoke-direct/range */
+            if (!rewriteInvokeObjectInit(method, insns)) {
+                /* may want to try execute-inline, below */
+                notMatched = true;
+            }
+            break;
         default:
             notMatched = true;
             break;
         }
 
+        /*
+         * essential-on-SMP substitutions:
+         *  {sget,sput}-* --> {sget,sput}-*-volatile
+         *  return-void --> return-void-barrier
+         */
         if (notMatched && gDvm.dexOptForSmp) {
-            /* additional "essential" substitutions for an SMP device */
             switch (inst) {
             case OP_SGET:
             case OP_SGET_BOOLEAN:
@@ -255,19 +284,23 @@
             }
         }
 
-        /* non-essential substitutions */
+        /*
+         * non-essential substitutions:
+         *  invoke-{virtual,direct,static}[/range] --> execute-inline
+         *  invoke-{virtual,super}[/range] --> invoke-*-quick
+         */
         if (notMatched && !essentialOnly) {
             switch (inst) {
             case OP_INVOKE_VIRTUAL:
                 if (!rewriteExecuteInline(method, insns, METHOD_VIRTUAL)) {
                     rewriteVirtualInvoke(method, insns,
-                            OP_INVOKE_VIRTUAL_QUICK);
+                        OP_INVOKE_VIRTUAL_QUICK);
                 }
                 break;
             case OP_INVOKE_VIRTUAL_RANGE:
                 if (!rewriteExecuteInlineRange(method, insns, METHOD_VIRTUAL)) {
                     rewriteVirtualInvoke(method, insns,
-                            OP_INVOKE_VIRTUAL_QUICK_RANGE);
+                        OP_INVOKE_VIRTUAL_QUICK_RANGE);
                 }
                 break;
             case OP_INVOKE_SUPER:
@@ -276,23 +309,18 @@
             case OP_INVOKE_SUPER_RANGE:
                 rewriteVirtualInvoke(method, insns, OP_INVOKE_SUPER_QUICK_RANGE);
                 break;
-
             case OP_INVOKE_DIRECT:
-                if (!rewriteExecuteInline(method, insns, METHOD_DIRECT)) {
-                    rewriteEmptyDirectInvoke(method, insns);
-                }
+                rewriteExecuteInline(method, insns, METHOD_DIRECT);
                 break;
             case OP_INVOKE_DIRECT_RANGE:
                 rewriteExecuteInlineRange(method, insns, METHOD_DIRECT);
                 break;
-
             case OP_INVOKE_STATIC:
                 rewriteExecuteInline(method, insns, METHOD_STATIC);
                 break;
             case OP_INVOKE_STATIC_RANGE:
                 rewriteExecuteInlineRange(method, insns, METHOD_STATIC);
                 break;
-
             default:
                 /* nothing to do for this instruction */
                 ;
@@ -310,16 +338,19 @@
 }
 
 /*
- * Update a 16-bit code unit in "meth".
+ * Update a 16-bit code unit in "meth".  The way in which the DEX data was
+ * loaded determines how we go about the write.
  */
-static inline void updateCodeUnit(const Method* meth, u2* ptr, u2 newVal)
+void dvmUpdateCodeUnit(const Method* meth, u2* ptr, u2 newVal)
 {
-    if (gDvm.optimizing) {
-        /* dexopt time, alter the output directly */
+    DvmDex* pDvmDex = meth->clazz->pDvmDex;
+
+    if (!pDvmDex->isMappedReadOnly) {
+        /* in-memory DEX (dexopt or byte[]), alter the output directly */
         *ptr = newVal;
     } else {
-        /* runtime, toggle the page read/write status */
-        dvmDexChangeDex2(meth->clazz->pDvmDex, ptr, newVal);
+        /* memory-mapped file, toggle the page read/write status */
+        dvmDexChangeDex2(pDvmDex, ptr, newVal);
     }
 }
 
@@ -328,7 +359,7 @@
  */
 static inline void updateOpcode(const Method* meth, u2* ptr, Opcode opcode)
 {
-    updateCodeUnit(meth, ptr, (ptr[0] & 0xff00) | (u2) opcode);
+    dvmUpdateCodeUnit(meth, ptr, (ptr[0] & 0xff00) | (u2) opcode);
 }
 
 /*
@@ -643,7 +674,7 @@
             instField->field.clazz->descriptor, instField->field.name);
     } else if (quickOpc != OP_NOP) {
         updateOpcode(method, insns, quickOpc);
-        updateCodeUnit(method, insns+1, (u2) instField->byteOffset);
+        dvmUpdateCodeUnit(method, insns+1, (u2) instField->byteOffset);
         LOGV("DexOpt: rewrote ifield access %s.%s --> %d\n",
             instField->field.clazz->descriptor, instField->field.name,
             instField->byteOffset);
@@ -857,7 +888,7 @@
      * initial load.
      */
     updateOpcode(method, insns, newOpc);
-    updateCodeUnit(method, insns+1, baseMethod->methodIndex);
+    dvmUpdateCodeUnit(method, insns+1, baseMethod->methodIndex);
 
     //LOGI("DexOpt: rewrote call to %s.%s --> %s.%s\n",
     //    method->clazz->descriptor, method->name,
@@ -867,16 +898,18 @@
 }
 
 /*
- * Rewrite invoke-direct, which has the form:
+ * Rewrite invoke-direct of Object.<init>, which has the form:
  *   op vAA, meth@BBBB, reg stuff @CCCC
  *
- * There isn't a lot we can do to make this faster, but in some situations
- * we can make it go away entirely.
+ * This is useful as an optimization, because otherwise every object
+ * instantiation will cause us to call a method that does nothing.
+ * It also allows us to inexpensively mark objects as finalizable at the
+ * correct time.
  *
- * This must only be used when the invoked method does nothing and has
- * no return value (the latter being very important for verification).
+ * TODO: verifier should ensure Object.<init> contains only return-void,
+ * and issue a warning if not.
  */
-static bool rewriteEmptyDirectInvoke(Method* method, u2* insns)
+static bool rewriteInvokeObjectInit(Method* method, u2* insns)
 {
     ClassObject* clazz = method->clazz;
     Method* calledMethod;
@@ -885,27 +918,34 @@
     calledMethod = dvmOptResolveMethod(clazz, methodIdx, METHOD_DIRECT, NULL);
     if (calledMethod == NULL) {
         LOGD("DexOpt: unable to opt direct call 0x%04x at 0x%02x in %s.%s\n",
-            methodIdx,
-            (int) (insns - method->insns), clazz->descriptor,
-            method->name);
+            methodIdx, (int) (insns - method->insns),
+            clazz->descriptor, method->name);
         return false;
     }
 
-    /* TODO: verify that java.lang.Object() is actually empty! */
     if (calledMethod->clazz == gDvm.classJavaLangObject &&
         dvmCompareNameDescriptorAndMethod("<init>", "()V", calledMethod) == 0)
     {
         /*
-         * Replace with "empty" instruction.  DO NOT disturb anything
-         * else about it, as we want it to function the same as
-         * OP_INVOKE_DIRECT when debugging is enabled.
+         * Replace the instruction.  If the debugger is attached, the
+         * interpreter will forward execution to the invoke-direct/range
+         * handler.  If this was an invoke-direct/range instruction we can
+         * just replace the opcode, but if it was an invoke-direct we
+         * have to set the argument count (high 8 bits of first code unit)
+         * to 1.
          */
-        assert((insns[0] & 0xff) == OP_INVOKE_DIRECT);
-        updateOpcode(method, insns, OP_INVOKE_DIRECT_EMPTY);
+        u1 origOp = insns[0] & 0xff;
+        if (origOp == OP_INVOKE_DIRECT) {
+            dvmUpdateCodeUnit(method, insns,
+                OP_INVOKE_OBJECT_INIT_RANGE | 0x100);
+        } else {
+            assert(origOp == OP_INVOKE_DIRECT_RANGE);
+            assert((insns[0] >> 8) == 1);
+            updateOpcode(method, insns, OP_INVOKE_OBJECT_INIT_RANGE);
+        }
 
-        //LOGI("DexOpt: marked-empty call to %s.%s --> %s.%s\n",
-        //    method->clazz->descriptor, method->name,
-        //    calledMethod->clazz->descriptor, calledMethod->name);
+        LOGVV("DexOpt: replaced Object.<init> in %s.%s\n",
+            method->clazz->descriptor, method->name);
     }
 
     return true;
@@ -1017,7 +1057,7 @@
                    (insns[0] & 0xff) == OP_INVOKE_STATIC ||
                    (insns[0] & 0xff) == OP_INVOKE_VIRTUAL);
             updateOpcode(method, insns, OP_EXECUTE_INLINE);
-            updateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
+            dvmUpdateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
 
             //LOGI("DexOpt: execute-inline %s.%s --> %s.%s\n",
             //    method->clazz->descriptor, method->name,
@@ -1057,7 +1097,7 @@
                    (insns[0] & 0xff) == OP_INVOKE_STATIC_RANGE ||
                    (insns[0] & 0xff) == OP_INVOKE_VIRTUAL_RANGE);
             updateOpcode(method, insns, OP_EXECUTE_INLINE_RANGE);
-            updateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
+            dvmUpdateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
 
             //LOGI("DexOpt: execute-inline/range %s.%s --> %s.%s\n",
             //    method->clazz->descriptor, method->name,
@@ -1088,29 +1128,32 @@
         return false;
 
     /*
-     * Check to see if the class has any final fields.  If not, we don't
-     * need to generate a barrier instruction.
+     * Check to see if the class is finalizable.  The loader sets a flag
+     * if the class or one of its superclasses overrides finalize().
      */
     const ClassObject* clazz = method->clazz;
-    int idx = clazz->ifieldCount;
-    while (--idx >= 0) {
-        if (dvmIsFinalField(&clazz->ifields[idx].field))
-            break;
-    }
-    if (idx < 0)
-        return false;
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE))
+        return true;
 
     /*
+     * Check to see if the class has any final fields.  If not, we don't
+     * need to generate a barrier instruction.
+     *
      * In theory, we only need to do this if the method actually modifies
      * a final field.  In practice, non-constructor methods are allowed
-     * to modify final fields by the VM, and there are tools that rely on
-     * this behavior.  (The compiler does not allow it.)
+     * to modify final fields, and there are 3rd-party tools that rely on
+     * this behavior.  (The compiler does not allow it, but the VM does.)
      *
      * If we alter the verifier to restrict final-field updates to
      * constructors, we can tighten this up as well.
      */
+    int idx = clazz->ifieldCount;
+    while (--idx >= 0) {
+        if (dvmIsFinalField(&clazz->ifields[idx].field))
+            return true;
+    }
 
-    return true;
+    return false;
 }
 
 /*
diff --git a/vm/analysis/Optimize.h b/vm/analysis/Optimize.h
index 30f7eef..75b6eab 100644
--- a/vm/analysis/Optimize.h
+++ b/vm/analysis/Optimize.h
@@ -21,17 +21,16 @@
 #define _DALVIK_OPTIMIZE
 
 /*
- * Prep data structures.
- */
-InlineSub* dvmCreateInlineSubsTable(void);
-void dvmFreeInlineSubsTable(InlineSub* inlineSubs);
-
-/*
  * Entry point from DEX preparation.
  */
 void dvmOptimizeClass(ClassObject* clazz, bool essentialOnly);
 
 /*
+ * Update a 16-bit code unit.
+ */
+void dvmUpdateCodeUnit(const Method* meth, u2* ptr, u2 newVal);
+
+/*
  * Abbreviated resolution functions, for use by optimization and verification
  * code.
  */
diff --git a/vm/analysis/RegisterMap.c b/vm/analysis/RegisterMap.c
index 2499a4b..8725ec0 100644
--- a/vm/analysis/RegisterMap.c
+++ b/vm/analysis/RegisterMap.c
@@ -870,7 +870,7 @@
  */
 const RegisterMap* dvmRegisterMapGetNext(const void** pPtr)
 {
-    const RegisterMap* pMap = *pPtr;
+    const RegisterMap* pMap = (const RegisterMap*) *pPtr;
 
     *pPtr = /*align32*/(((u1*) pMap) + computeRegisterMapSize(pMap));
     LOGVV("getNext: %p -> %p (f=0x%x w=%d e=%d)\n",
diff --git a/vm/analysis/RegisterMap.h b/vm/analysis/RegisterMap.h
index 7897d45..886d0b0 100644
--- a/vm/analysis/RegisterMap.h
+++ b/vm/analysis/RegisterMap.h
@@ -67,7 +67,7 @@
  * Get the format.
  */
 INLINE RegisterMapFormat dvmRegisterMapGetFormat(const RegisterMap* pMap) {
-    return pMap->format & ~(kRegMapFormatOnHeap);
+    return (RegisterMapFormat)(pMap->format & ~(kRegMapFormatOnHeap));
 }
 
 /*
diff --git a/vm/analysis/VerifySubs.c b/vm/analysis/VerifySubs.c
index df1dcaf..2366f68 100644
--- a/vm/analysis/VerifySubs.c
+++ b/vm/analysis/VerifySubs.c
@@ -23,6 +23,21 @@
 
 
 /*
+ * This is used when debugging to apply a magnifying glass to the
+ * verification of a particular method.
+ */
+bool dvmWantVerboseVerification(const Method* meth)
+{
+    return false;       /* COMMENT OUT to enable verbose debugging */
+
+    const char* cd = "Lcom/android/server/am/ActivityManagerService;";
+    const char* mn = "trimApplications";
+    const char* sg = "()V";
+    return (strcmp(meth->clazz->descriptor, cd) == 0 &&
+            dvmCompareNameDescriptorAndMethod(mn, sg, meth) == 0);
+}
+
+/*
  * Output a code verifier warning message.  For the pre-verifier it's not
  * a big deal if something fails (and it may even be expected), but if
  * we're doing just-in-time verification it's significant.
@@ -79,8 +94,8 @@
  *
  * Returns "false" on failure (e.g. this isn't a branch instruction).
  */
-bool dvmGetBranchTarget(const Method* meth, InsnFlags* insnFlags,
-    int curOffset, int* pOffset, bool* pConditional)
+bool dvmGetBranchOffset(const Method* meth, const InsnFlags* insnFlags,
+    int curOffset, s4* pOffset, bool* pConditional)
 {
     const u2* insns = meth->insns + curOffset;
 
diff --git a/vm/analysis/VerifySubs.h b/vm/analysis/VerifySubs.h
index f145fff..625244d 100644
--- a/vm/analysis/VerifySubs.h
+++ b/vm/analysis/VerifySubs.h
@@ -61,11 +61,14 @@
 void dvmLogUnableToResolveClass(const char* missingClassDescr,
     const Method* meth);
 
-/* extract the relative branch target from a branch instruction */
-bool dvmGetBranchTarget(const Method* meth, InsnFlags* insnFlags,
-    int curOffset, int* pOffset, bool* pConditional);
+/* extract the relative branch offset from a branch instruction */
+bool dvmGetBranchOffset(const Method* meth, const InsnFlags* insnFlags,
+    int curOffset, s4* pOffset, bool* pConditional);
 
 /* return a RegType enumeration value that "value" just fits into */
 char dvmDetermineCat1Const(s4 value);
 
+/* debugging */
+bool dvmWantVerboseVerification(const Method* meth);
+
 #endif /*_DALVIK_VERIFYSUBS*/
diff --git a/vm/analysis/VfyBasicBlock.c b/vm/analysis/VfyBasicBlock.c
new file mode 100644
index 0000000..7500ba0
--- /dev/null
+++ b/vm/analysis/VfyBasicBlock.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Verifier basic block functions.
+ */
+#include "Dalvik.h"
+#include "analysis/VfyBasicBlock.h"
+#include "analysis/CodeVerify.h"
+#include "analysis/VerifySubs.h"
+#include "libdex/DexCatch.h"
+#include "libdex/InstrUtils.h"
+
+
+/*
+ * Extract the list of catch handlers from "pTry" into "addrBuf".
+ *
+ * Returns the size of the catch handler list.  If the return value
+ * exceeds "addrBufSize", the items at the end of the list will not be
+ * represented in the output array, and this function should be called
+ * again with a larger buffer.
+ */
+static u4 extractCatchHandlers(const DexCode* pCode, const DexTry* pTry,
+    u4* addrBuf, size_t addrBufSize)
+{
+    DexCatchIterator iterator;
+    unsigned int idx = 0;
+
+    dexCatchIteratorInit(&iterator, pCode, pTry->handlerOff);
+    while (true) {
+        DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+        if (handler == NULL)
+            break;
+
+        if (idx < addrBufSize) {
+            addrBuf[idx] = handler->address;
+        }
+        idx++;
+    }
+
+    return idx;
+}
+
+/*
+ * Returns "true" if the instruction represents a data chunk, such as a
+ * switch statement block.
+ */
+static bool isDataChunk(u2 insn)
+{
+    return (insn == kPackedSwitchSignature ||
+            insn == kSparseSwitchSignature ||
+            insn == kArrayDataSignature);
+}
+
+/*
+ * Alloc a basic block in the specified slot.  The storage will be
+ * initialized.
+ */
+static VfyBasicBlock* allocVfyBasicBlock(VerifierData* vdata, u4 idx)
+{
+    VfyBasicBlock* newBlock = (VfyBasicBlock*) calloc(1, sizeof(VfyBasicBlock));
+    if (newBlock == NULL)
+        return NULL;
+
+    /*
+     * TODO: there is no good default size here -- the problem is that most
+     * addresses will only have one predecessor, but a fair number will
+     * have 10+, and a few will have 100+ (e.g. the synthetic "finally"
+     * in a large synchronized method).  We probably want to use a small
+     * base allocation (perhaps two) and then have the first overflow
+     * allocation jump dramatically (to 32 or thereabouts).
+     */
+    newBlock->predecessors = dvmPointerSetAlloc(32);
+    if (newBlock->predecessors == NULL) {
+        free(newBlock);
+        return NULL;
+    }
+
+    newBlock->firstAddr = (u4) -1;      // DEBUG
+
+    newBlock->liveRegs = dvmAllocBitVector(vdata->insnRegCount, false);
+    if (newBlock->liveRegs == NULL) {
+        dvmPointerSetFree(newBlock->predecessors);
+        free(newBlock);
+        return NULL;
+    }
+
+    return newBlock;
+}
+
+/*
+ * Add "curBlock" to the predecessor list in "targetIdx".
+ */
+static bool addToPredecessor(VerifierData* vdata, VfyBasicBlock* curBlock,
+    u4 targetIdx)
+{
+    assert(targetIdx < vdata->insnsSize);
+
+    /*
+     * Allocate the target basic block if necessary.  This will happen
+     * on e.g. forward branches.
+     *
+     * We can't fill in all the fields, but that will happen automatically
+     * when we get to that part of the code.
+     */
+    VfyBasicBlock* targetBlock = vdata->basicBlocks[targetIdx];
+    if (targetBlock == NULL) {
+        targetBlock = allocVfyBasicBlock(vdata, targetIdx);
+        if (targetBlock == NULL)
+            return false;
+        vdata->basicBlocks[targetIdx] = targetBlock;
+    }
+
+    PointerSet* preds = targetBlock->predecessors;
+    bool added = dvmPointerSetAddEntry(preds, curBlock);
+    if (!added) {
+        /*
+         * This happens sometimes for packed-switch instructions, where
+         * the same target address appears more than once.  Also, a
+         * (pointless) conditional branch to the next instruction will
+         * trip over this.
+         */
+        LOGV("ODD: point set for targ=0x%04x (%p) already had block "
+             "fir=0x%04x (%p)\n",
+            targetIdx, targetBlock, curBlock->firstAddr, curBlock);
+    }
+
+    return true;
+}
+
+/*
+ * Add ourselves to the predecessor list in all blocks we might transfer
+ * control to.
+ *
+ * There are four ways to proceed to a new instruction:
+ *  (1) continue to the following instruction
+ *  (2) [un]conditionally branch to a specific location
+ *  (3) conditionally branch through a "switch" statement
+ *  (4) throw an exception
+ *
+ * Returning from the method (via a return statement or an uncaught
+ * exception) are not interesting for liveness analysis.
+ */
+static bool setPredecessors(VerifierData* vdata, VfyBasicBlock* curBlock,
+    u4 curIdx, OpcodeFlags opFlags, u4 nextIdx, u4* handlerList,
+    size_t numHandlers)
+{
+    const InsnFlags* insnFlags = vdata->insnFlags;
+    const Method* meth = vdata->method;
+
+    unsigned int handlerIdx;
+    for (handlerIdx = 0; handlerIdx < numHandlers; handlerIdx++) {
+        if (!addToPredecessor(vdata, curBlock, handlerList[handlerIdx]))
+            return false;
+    }
+
+    if ((opFlags & kInstrCanContinue) != 0) {
+        if (!addToPredecessor(vdata, curBlock, nextIdx))
+            return false;
+    }
+    if ((opFlags & kInstrCanBranch) != 0) {
+        bool unused, gotBranch;
+        s4 branchOffset, absOffset;
+
+        gotBranch = dvmGetBranchOffset(meth, insnFlags, curIdx,
+                &branchOffset, &unused);
+        assert(gotBranch);
+        absOffset = curIdx + branchOffset;
+        assert(absOffset >= 0 && (u4) absOffset < vdata->insnsSize);
+
+        if (!addToPredecessor(vdata, curBlock, absOffset))
+            return false;
+    }
+
+    if ((opFlags & kInstrCanSwitch) != 0) {
+        const u2* curInsn = &meth->insns[curIdx];
+        const u2* dataPtr;
+
+        /* these values have already been verified, so we can trust them */
+        s4 offsetToData = curInsn[1] | ((s4) curInsn[2]) << 16;
+        dataPtr = curInsn + offsetToData;
+
+        /*
+         * dataPtr points to the start of the switch data.  The first
+         * item is the NOP+magic, the second is the number of entries in
+         * the switch table.
+         */
+        u2 switchCount = dataPtr[1];
+
+        /*
+         * Skip past the ident field, size field, and the first_key field
+         * (for packed) or the key list (for sparse).
+         */
+        if (dexOpcodeFromCodeUnit(meth->insns[curIdx]) == OP_PACKED_SWITCH) {
+            dataPtr += 4;
+        } else {
+            assert(dexOpcodeFromCodeUnit(meth->insns[curIdx]) ==
+                    OP_SPARSE_SWITCH);
+            dataPtr += 2 + 2 * switchCount;
+        }
+
+        u4 switchIdx;
+        for (switchIdx = 0; switchIdx < switchCount; switchIdx++) {
+            s4 offset, absOffset;
+
+            offset = (s4) dataPtr[switchIdx*2] |
+                     (s4) (dataPtr[switchIdx*2 +1] << 16);
+            absOffset = curIdx + offset;
+            assert(absOffset >= 0 && (u4) absOffset < vdata->insnsSize);
+
+            if (!addToPredecessor(vdata, curBlock, absOffset))
+                return false;
+        }
+    }
+
+    if (false) {
+        if (dvmPointerSetGetCount(curBlock->predecessors) > 256) {
+            LOGI("Lots of preds at 0x%04x in %s.%s:%s\n", curIdx,
+                meth->clazz->descriptor, meth->name, meth->shorty);
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Dump the contents of the basic blocks.
+ */
+static void dumpBasicBlocks(const VerifierData* vdata)
+{
+    char printBuf[256];
+    unsigned int idx;
+    int count;
+
+    LOGI("Basic blocks for %s.%s:%s\n", vdata->method->clazz->descriptor,
+        vdata->method->name, vdata->method->shorty);
+    for (idx = 0; idx < vdata->insnsSize; idx++) {
+        VfyBasicBlock* block = vdata->basicBlocks[idx];
+        if (block == NULL)
+            continue;
+
+        assert(block->firstAddr == idx);
+        count = snprintf(printBuf, sizeof(printBuf), " %04x-%04x ",
+            block->firstAddr, block->lastAddr);
+
+        PointerSet* preds = block->predecessors;
+        size_t numPreds = dvmPointerSetGetCount(preds);
+
+        if (numPreds > 0) {
+            count += snprintf(printBuf + count, sizeof(printBuf) - count,
+                    "preds:");
+
+            unsigned int predIdx;
+            for (predIdx = 0; predIdx < numPreds; predIdx++) {
+                if (count >= (int) sizeof(printBuf))
+                    break;
+                const VfyBasicBlock* pred =
+                    (const VfyBasicBlock*) dvmPointerSetGetEntry(preds, predIdx);
+                count += snprintf(printBuf + count, sizeof(printBuf) - count,
+                        "%04x(%p),", pred->firstAddr, pred);
+            }
+        } else {
+            count += snprintf(printBuf + count, sizeof(printBuf) - count,
+                    "(no preds)");
+        }
+
+        printBuf[sizeof(printBuf)-2] = '!';
+        printBuf[sizeof(printBuf)-1] = '\0';
+
+        LOGI("%s", printBuf);
+    }
+
+    usleep(100 * 1000);      /* ugh...let logcat catch up */
+}
+
+
+/*
+ * Generate a list of basic blocks and related information.
+ *
+ * On success, returns "true" with vdata->basicBlocks initialized.
+ */
+bool dvmComputeVfyBasicBlocks(VerifierData* vdata)
+{
+    const InsnFlags* insnFlags = vdata->insnFlags;
+    const Method* meth = vdata->method;
+    const u4 insnsSize = vdata->insnsSize;
+    const DexCode* pCode = dvmGetMethodCode(meth);
+    const DexTry* pTries = NULL;
+    const size_t kHandlerStackAllocSize = 16;   /* max seen so far is 7 */
+    u4 handlerAddrs[kHandlerStackAllocSize];
+    u4* handlerListAlloc = NULL;
+    u4* handlerList = NULL;
+    size_t numHandlers = 0;
+    u4 idx, blockStartAddr;
+    bool result = false;
+
+    bool verbose = false; //dvmWantVerboseVerification(meth);
+    if (verbose) {
+        LOGI("Basic blocks for %s.%s:%s\n",
+            meth->clazz->descriptor, meth->name, meth->shorty);
+    }
+
+    /*
+     * Allocate a data structure that allows us to map from an address to
+     * the corresponding basic block.  Initially all pointers are NULL.
+     * They are populated on demand as we proceed (either when we reach a
+     * new BB, or when we need to add an item to the predecessor list in
+     * a not-yet-reached BB).
+     *
+     * Only the first instruction in the block points to the BB structure;
+     * the rest remain NULL.
+     */
+    vdata->basicBlocks =
+        (VfyBasicBlock**) calloc(insnsSize, sizeof(VfyBasicBlock*));
+    if (vdata->basicBlocks == NULL)
+        goto bail;
+
+    /*
+     * The "tries" list is a series of non-overlapping regions with a list
+     * of "catch" handlers.  Rather than do the "find a matching try block"
+     * computation at each step, we just walk the "try" list in parallel.
+     *
+     * Not all methods have "try" blocks.  If this one does, we init tryEnd
+     * to zero, so that the (exclusive bound) range check trips immediately.
+     */
+    u4 tryIndex = 0, tryStart = 0, tryEnd = 0;
+    if (pCode->triesSize != 0) {
+        pTries = dexGetTries(pCode);
+    }
+
+    u4 debugBBIndex = 0;
+
+    /*
+     * The address associated with a basic block is the start address.
+     */
+    blockStartAddr = 0;
+
+    for (idx = 0; idx < insnsSize; ) {
+        /*
+         * Make sure we're pointing at the right "try" block.  It should
+         * not be possible to "jump over" a block, so if we're no longer
+         * in the correct one we can just advance to the next.
+         */
+        if (pTries != NULL && idx >= tryEnd) {
+            if (tryIndex == pCode->triesSize) {
+                /* no more try blocks in this method */
+                pTries = NULL;
+                numHandlers = 0;
+            } else {
+                /*
+                 * Extract the set of handlers.  We want to avoid doing
+                 * this for each block, so we copy them to local storage.
+                 * If it doesn't fit in the small stack area, we'll use
+                 * the heap instead.
+                 *
+                 * It's rare to encounter a method with more than half a
+                 * dozen possible handlers.
+                 */
+                tryStart = pTries[tryIndex].startAddr;
+                tryEnd = tryStart + pTries[tryIndex].insnCount;
+
+                if (handlerListAlloc != NULL) {
+                    free(handlerListAlloc);
+                    handlerListAlloc = NULL;
+                }
+                numHandlers = extractCatchHandlers(pCode, &pTries[tryIndex],
+                    handlerAddrs, kHandlerStackAllocSize);
+                assert(numHandlers > 0);    // TODO make sure this is verified
+                if (numHandlers <= kHandlerStackAllocSize) {
+                    handlerList = handlerAddrs;
+                } else {
+                    LOGD("overflow, numHandlers=%d\n", numHandlers);
+                    handlerListAlloc = (u4*) malloc(sizeof(u4) * numHandlers);
+                    if (handlerListAlloc == NULL)
+                        return false;
+                    extractCatchHandlers(pCode, &pTries[tryIndex],
+                        handlerListAlloc, numHandlers);
+                    handlerList = handlerListAlloc;
+                }
+
+                LOGV("+++ start=%x end=%x numHan=%d\n",
+                    tryStart, tryEnd, numHandlers);
+
+                tryIndex++;
+            }
+        }
+
+        /*
+         * Check the current instruction, and possibly aspects of the
+         * next instruction, to see if this instruction ends the current
+         * basic block.
+         *
+         * Instructions that can throw only end the block if there is the
+         * possibility of a local handler catching the exception.
+         */
+        Opcode opcode = dexOpcodeFromCodeUnit(meth->insns[idx]);
+        OpcodeFlags opFlags = dexGetFlagsFromOpcode(opcode);
+        size_t nextIdx = idx + dexGetWidthFromInstruction(&meth->insns[idx]);
+        bool endBB = false;
+        bool ignoreInstr = false;
+
+        if ((opFlags & kInstrCanContinue) == 0) {
+            /* does not continue */
+            endBB = true;
+        } else if ((opFlags & (kInstrCanBranch | kInstrCanSwitch)) != 0) {
+            /* conditionally branches elsewhere */
+            endBB = true;
+        } else if ((opFlags & kInstrCanThrow) != 0 &&
+                dvmInsnIsInTry(insnFlags, idx))
+        {
+            /* throws an exception that might be caught locally */
+            endBB = true;
+        } else if (isDataChunk(meth->insns[idx])) {
+            /*
+             * If this is a data chunk (e.g. switch data) we want to skip
+             * over it entirely.  Set endBB so we don't carry this along as
+             * the start of a block, and ignoreInstr so we don't try to
+             * open a basic block for this instruction.
+             */
+            endBB = ignoreInstr = true;
+        } else if (dvmInsnIsBranchTarget(insnFlags, nextIdx)) {
+            /*
+             * We also need to end it if the next instruction is a branch
+             * target.  Note we've tagged exception catch blocks as such.
+             *
+             * If we're this far along in the "else" chain, we know that
+             * this isn't a data-chunk NOP, and control can continue to
+             * the next instruction, so we're okay examining "nextIdx".
+             */
+            assert(nextIdx < insnsSize);
+            endBB = true;
+        } else if (opcode == OP_NOP && isDataChunk(meth->insns[nextIdx])) {
+            /*
+             * Handle an odd special case: if this is NOP padding before a
+             * data chunk, also treat it as "ignore".  Otherwise it'll look
+             * like a block that starts and doesn't end.
+             */
+            endBB = ignoreInstr = true;
+        } else {
+            /* check: return ops should be caught by absence of can-continue */
+            assert((opFlags & kInstrCanReturn) == 0);
+        }
+
+        if (verbose) {
+            char btc = dvmInsnIsBranchTarget(insnFlags, idx) ? '>' : ' ';
+            char tryc =
+                (pTries != NULL && idx >= tryStart && idx < tryEnd) ? 't' : ' ';
+            bool startBB = (idx == blockStartAddr);
+            const char* startEnd;
+
+
+            if (ignoreInstr)
+                startEnd = "IGNORE";
+            else if (startBB && endBB)
+                startEnd = "START/END";
+            else if (startBB)
+                startEnd = "START";
+            else if (endBB)
+                startEnd = "END";
+            else
+                startEnd = "-";
+
+            LOGI("%04x: %c%c%s #%d\n", idx, tryc, btc, startEnd, debugBBIndex);
+
+            if (pTries != NULL && idx == tryStart) {
+                assert(numHandlers > 0);
+                LOGI("  EXC block: [%04x, %04x) %d:(%04x...)\n",
+                    tryStart, tryEnd, numHandlers, handlerList[0]);
+            }
+        }
+
+        if (idx != blockStartAddr) {
+            /* should not be a basic block struct associated with this addr */
+            assert(vdata->basicBlocks[idx] == NULL);
+        }
+        if (endBB) {
+            if (!ignoreInstr) {
+                /*
+                 * Create a new BB if one doesn't already exist.
+                 */
+                VfyBasicBlock* curBlock = vdata->basicBlocks[blockStartAddr];
+                if (curBlock == NULL) {
+                    curBlock = allocVfyBasicBlock(vdata, blockStartAddr);
+                    if (curBlock == NULL)
+                        return false;
+                    vdata->basicBlocks[blockStartAddr] = curBlock;
+                }
+
+                curBlock->firstAddr = blockStartAddr;
+                curBlock->lastAddr = idx;
+
+                if (!setPredecessors(vdata, curBlock, idx, opFlags, nextIdx,
+                        handlerList, numHandlers))
+                {
+                    goto bail;
+                }
+            }
+
+            blockStartAddr = nextIdx;
+            debugBBIndex++;
+        }
+
+        idx = nextIdx;
+    }
+
+    assert(idx == insnsSize);
+
+    result = true;
+
+    if (verbose)
+        dumpBasicBlocks(vdata);
+
+bail:
+    free(handlerListAlloc);
+    return result;
+}
+
+/*
+ * Free the storage used by basic blocks.
+ */
+void dvmFreeVfyBasicBlocks(VerifierData* vdata)
+{
+    unsigned int idx;
+
+    if (vdata->basicBlocks == NULL)
+        return;
+
+    for (idx = 0; idx < vdata->insnsSize; idx++) {
+        VfyBasicBlock* block = vdata->basicBlocks[idx];
+        if (block == NULL)
+            continue;
+
+        dvmPointerSetFree(block->predecessors);
+        free(block);
+    }
+}
diff --git a/vm/analysis/VfyBasicBlock.h b/vm/analysis/VfyBasicBlock.h
new file mode 100644
index 0000000..ff2c680
--- /dev/null
+++ b/vm/analysis/VfyBasicBlock.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Basic block functions, as used by the verifier.  (The names were chosen
+ * to avoid conflicts with similar structures used by the compiler.)
+ */
+#ifndef _DALVIK_VFYBASICBLOCK
+#define _DALVIK_VFYBASICBLOCK
+
+#include "PointerSet.h"
+
+struct VerifierData;
+
+
+/*
+ * Structure representing a basic block.
+ *
+ * This is used for liveness analysis, which is a reverse-flow algorithm,
+ * so we need to mantain a list of predecessors for each block.
+ *
+ * "liveRegs" indicates the set of registers that are live at the end of
+ * the basic block (after the last instruction has executed).  Successor
+ * blocks will compare their results with this to see if this block needs
+ * to be re-evaluated.  Note that this is not the same as the contents of
+ * the RegisterLine for the last instruction in the block (which reflects
+ * the state *before* the instruction has executed).
+ */
+typedef struct {
+    u4              firstAddr;      /* address of first instruction */
+    u4              lastAddr;       /* address of last instruction */
+    PointerSet*     predecessors;   /* set of basic blocks that can flow here */
+    BitVector*      liveRegs;       /* liveness for each register */
+    bool            changed;        /* input set has changed, must re-eval */
+    bool            visited;        /* block has been visited at least once */
+} VfyBasicBlock;
+
+/*
+ * Generate a list of basic blocks.
+ */
+bool dvmComputeVfyBasicBlocks(struct VerifierData* vdata);
+
+/*
+ * Free storage allocated by dvmComputeVfyBasicBlocks.
+ */
+void dvmFreeVfyBasicBlocks(struct VerifierData* vdata);
+
+#endif /*_DALVIK_VFYBASICBLOCK*/
diff --git a/vm/arch/arm/CallEABI.S b/vm/arch/arm/CallEABI.S
index 79665ad..9971b5d 100644
--- a/vm/arch/arm/CallEABI.S
+++ b/vm/arch/arm/CallEABI.S
@@ -101,7 +101,7 @@
  *   SRRRLLLL FFFFFFFF FFFFFFFF FFFFFFFF
  *
  *   S - if set, do things the hard way (scan the signature)
- *   R - return type enumeration, really only important for hardware FP
+ *   R - return-type enumeration, really only important for "hard" FP ABI
  *   L - number of double-words of storage required on stack (0-30 words)
  *   F - pad flag -- if set, write a pad word to the stack
  *
@@ -113,53 +113,63 @@
  * in a row, and the first word can never be a pad -- but there's really
  * no need for it.)
  *
- * TODO: could reduce register-saving overhead for "fast" case, since we
- * don't use a couple of registers.  Another thought is to rearrange the
- * arguments such that r0/r1 get passed in on the stack, allowing us to
- * use r0/r1 freely here and then load them with a single ldm.  Might be
- * faster than saving/restoring other registers so that we can leave r0/r1
- * undisturbed.
- *
  * NOTE: if the called function has more than 4 words of arguments, gdb
  * will not be able to unwind the stack past this method.  The only way
  * around this is to convince gdb to respect an explicit frame pointer.
+ * The stack unwinder in debuggerd *does* pay attention to fp if we set it
+ * up appropriately, so at least that will work.
  */
 dvmPlatformInvoke:
     .fnstart
-    @ Save regs.  Same style as gcc with "-fomit-frame-pointer" -- we don't
-    @ disturb "fp" in case somebody else wants it.  Copy "sp" to r4 and use
-    @ that to access local vars.
-    @
-    @ On entry to a function, "sp" must be 64-bit aligned.  This means
-    @ we have to adjust sp manually if we push an odd number of regs here
-    @ (both here and when exiting).  Easier to just push an even number
-    @ of registers.
-    mov     ip, sp                      @ ip<- original stack pointer
-    .save {r4, r5, r6, r7, r8, r9, ip, lr}
-    stmfd   sp!, {r4, r5, r6, r7, r8, r9, ip, lr}
 
-    mov     r4, ip                      @ r4<- original stack pointer
+    /*
+     * Save regs.
+     *
+     * On entry to a function, "sp" must be 64-bit aligned.  This means
+     * we have to adjust sp manually if we push an odd number of regs here
+     * (both here and when exiting).
+     *
+     * The ARM spec doesn't specify anything about the frame pointer.  gcc
+     * points fp at the first saved argument, so our "full descending"
+     * stack looks like:
+     *
+     *  pReturn
+     *  func
+     *  shorty
+     *  argv        <-- sp on entry
+     *  lr          <-- fp
+     *  fp
+     *  r9...r7
+     *  r6          <-- sp after reg save
+     *
+     * Any arguments that need to be pushed on for the target method
+     * come after this.  The last argument is pushed first.
+     */
+SAVED_REG_COUNT = 6                     @ push 6 regs
+FP_STACK_OFFSET = (SAVED_REG_COUNT-1) * 4 @ offset between fp and post-save sp
+FP_ADJ = 4                              @ fp is initial sp +4
+
+    .save        {r6, r7, r8, r9, fp, lr}
+    stmfd   sp!, {r6, r7, r8, r9, fp, lr}
+
+    .setfp  fp, sp, #FP_STACK_OFFSET    @ point fp at first saved reg
+    add     fp, sp, #FP_STACK_OFFSET
+
+    @.pad    #4                          @ adjust for 64-bit align
+    @sub     sp, sp, #4                  @ (if we save odd number of regs)
 
     @ Ensure 64-bit alignment.  EABI guarantees sp is aligned on entry, make
     @ sure we're aligned properly now.
 DBG tst     sp, #4                      @ 64-bit aligned?
-DBG bne     dvmAbort
+DBG bne     dvmAbort                    @ no, fail
 
-    cmp     r1, #0                      @ Is this a static method?
-    ldr     r9, [r4]                    @ r9<- argv
+    ldr     r9, [fp, #0+FP_ADJ]         @ r9<- argv
+    cmp     r1, #0                      @ calling a static method?
 
-    @ Not static: set r1 to *argv++ ("this"), and set argc--.
-    @
-    @ Note the "this" pointer is not included in the method signature.
-#ifdef WORKAROUND_CORTEX_A9_745320
-    bne     1f
-    ldr     r1, [r9], #4
-    sub     r3, r3, #1
-1:
-#else
-    ldreq   r1, [r9], #4
-    subeq   r3, r3, #1
-#endif
+    @ Not static, grab the "this" pointer.  Note "this" is not explicitly
+    @ described by the method signature.
+    subeq   r3, r3, #1                  @ argc--
+    ldreq   r1, [r9], #4                @ r1<- *argv++
 
     @ Do we have arg padding flags in "argInfo"? (just need to check hi bit)
     teq     r2, #0
@@ -172,20 +182,21 @@
      * inserting pad words when appropriate.
      *
      * Currently:
-     *   r0  don't touch
-     *   r1  don't touch
-     *   r2  arg info
-     *   r3  argc
-     *   r4  original stack pointer
-     *   r5-r8 (available)
-     *   r9  argv
+     *  r0  don't touch
+     *  r1  don't touch
+     *  r2  arg info
+     *  r3  argc
+     *  r4-r5  don't touch (not saved)
+     *  r6-r8 (available)
+     *  r9  argv
+     *  fp  frame pointer
      */
 .Lhave_arg_info:
     @ Expand the stack by the specified amount.  We want to extract the
     @ count of double-words from r2, multiply it by 8, and subtract that
     @ from the stack pointer.
     and     ip, r2, #0x0f000000         @ ip<- double-words required
-    mov     r5, r2, lsr #28             @ r5<- return type
+    mov     r6, r2, lsr #28             @ r6<- return type
     sub     sp, sp, ip, lsr #21         @ shift right 24, then left 3
     mov     r8, sp                      @ r8<- sp  (arg copy dest)
 
@@ -212,18 +223,9 @@
     @ Get pad flag into carry bit.  If it's set, we don't pull a value
     @ out of argv.
     movs    r2, r2, lsr #1
-
-#ifdef WORKAROUND_CORTEX_A9_745320
-    bcs     1f
-    ldr     ip, [r7], #4                @ ip = *r7++ (pull from argv)
-    str     ip, [r8], #4                @ *r8++ = ip (write to stack)
-    b       .Lfast_copy_loop
-1:
-#else
     ldrcc   ip, [r7], #4                @ ip = *r7++ (pull from argv)
     strcc   ip, [r8], #4                @ *r8++ = ip (write to stack)
     bcc     .Lfast_copy_loop
-#endif
 
 DBG movcs   ip, #-3                     @ DEBUG DEBUG - make pad word obvious
 DBG strcs   ip, [r8]                    @ DEBUG DEBUG
@@ -231,14 +233,13 @@
     b       .Lfast_copy_loop2           @ don't adjust argc after writing pad
 
 
-
 .Lcopy_done:
     /*
      * Currently:
      *  r0-r3  args (JNIEnv*, thisOrClass, arg0, arg1)
-     *  r4  original saved sp
-     *  r5  return type (enum DalvikJniReturnType)
+     *  r6  return type (enum DalvikJniReturnType)
      *  r9  original argv
+     *  fp  frame pointer
      *
      * The stack copy is complete.  Grab the first two words off of argv
      * and tuck them into r2/r3.  If the first arg is 32-bit and the second
@@ -249,16 +250,14 @@
      * data into the registers, but since nothing tries to use it it's also
      * harmless (assuming argv[0] and argv[1] point to valid memory, which
      * is a reasonable assumption for Dalvik's interpreted stacks).
-     *
      */
     ldmia   r9, {r2-r3}                 @ r2/r3<- argv[0]/argv[1]
 
-    @ call the method
-    ldr     ip, [r4, #8]                @ func
+    ldr     ip, [fp, #8+FP_ADJ]         @ ip<- func
 #ifdef __ARM_HAVE_BLX
-    blx     ip
+    blx     ip                          @ call func
 #else
-    mov     lr, pc
+    mov     lr, pc                      @ call func the old-fashioned way
     bx      ip
 #endif
 
@@ -273,26 +272,19 @@
     @ and double-word values occupy different ranges; simple comparison
     @ allows us to choose between str and stm.  Probably not worthwhile.
     @
-    cmp     r5, #0                      @ DALVIK_JNI_RETURN_VOID?
-#ifdef WORKAROUND_CORTEX_A9_745320
-    beq     1f
-    ldr     ip, [r4, #12]               @ pReturn
-    stmia   ip, {r0-r1}                 @ pReturn->j <- r0/r1
-1:
-#else
-    ldrne   ip, [r4, #12]               @ pReturn
+    cmp     r6, #0                      @ DALVIK_JNI_RETURN_VOID?
+    ldrne   ip, [fp, #12+FP_ADJ]        @ pReturn
+    sub     sp, fp, #FP_STACK_OFFSET    @ restore sp to post-reg-save offset
     stmneia ip, {r0-r1}                 @ pReturn->j <- r0/r1
-#endif
 
-    @ Restore the registers we saved and return (restores lr into pc, and
-    @ the initial stack pointer into sp).
+    @ Restore the registers we saved and return.  On >= ARMv5TE we can
+    @ restore PC directly from the saved LR.
 #ifdef __ARM_HAVE_PC_INTERWORK
-    ldmdb   r4, {r4, r5, r6, r7, r8, r9, sp, pc}
+    ldmfd   sp!, {r6, r7, r8, r9, fp, pc}
 #else
-    ldmdb   r4, {r4, r5, r6, r7, r8, r9, sp, lr}
+    ldmfd   sp!, {r6, r7, r8, r9, fp, lr}
     bx      lr
 #endif
-    .fnend
 
 
 
@@ -308,17 +300,18 @@
      * the class file format allows up to 64K words (need to verify that).
      *
      * Currently:
-     *   r0  don't touch
-     *   r1  don't touch
-     *   r2  (available)
-     *   r3  argc
-     *   r4  original stack pointer
-     *   r5-r8 (available)
-     *   r9  argv
+     *  r0  don't touch
+     *  r1  don't touch
+     *  r2  (available)
+     *  r3  argc
+     *  r4-r5 don't touch (not saved)
+     *  r6-r8 (available)
+     *  r9  argv
+     *  fp  frame pointer
      */
 .Lno_arg_info:
-    mov     r5, r2, lsr #28             @ r5<- return type
-    ldr     r6, [r4, #4]                @ r6<- short signature
+    mov     ip, r2, lsr #28             @ ip<- return type
+    ldr     r6, [fp, #4+FP_ADJ]         @ r6<- short signature
     add     r6, r6, #1                  @ advance past return type
     mov     r2, #0                      @ r2<- word count, init to zero
 
@@ -359,7 +352,7 @@
     @ We need to copy words from [r7] to [r8].  We walk forward through
     @ the signature again, "copying" pad words when appropriate, storing
     @ upward into the stack.
-    ldr     r6, [r4, #4]                @ r6<- signature
+    ldr     r6, [fp, #4+FP_ADJ]         @ r6<- signature
     add     r6, r6, #1                  @ advance past return type
     add     r7, r7, #8                  @ r7<- r7+8 (assume argv 0/1 in r2/r3)
 
@@ -408,7 +401,8 @@
     str     r2, [r8], #4
     b       .Lstack_copy_loop
 
-
+    .fnend
+    .size   dvmPlatformInvoke, .-dvmPlatformInvoke
 
 #if 0
 
diff --git a/vm/arch/generic/Call.c b/vm/arch/generic/Call.c
index a39b761..ae74415 100644
--- a/vm/arch/generic/Call.c
+++ b/vm/arch/generic/Call.c
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 /*
  * This uses the FFI (Foreign Function Interface) library to abstract away
  * the system-dependent stuff.  The FFI code is slower than a custom
@@ -29,12 +30,20 @@
 {
     switch (sigType) {
     case 'V': return &ffi_type_void;
+    case 'Z': return &ffi_type_uint8;
+    case 'B': return &ffi_type_sint8;
+    case 'C': return &ffi_type_uint16;
+    case 'S': return &ffi_type_sint16;
+    case 'I': return &ffi_type_sint32;
     case 'F': return &ffi_type_float;
-    case 'D': return &ffi_type_double;
     case 'J': return &ffi_type_sint64;
+    case 'D': return &ffi_type_double;
     case '[':
     case 'L': return &ffi_type_pointer;
-    default:  return &ffi_type_uint32;
+    default:
+        LOGE("bad ffitype 0x%02x\n", sigType);
+        dvmAbort();
+        return NULL;
     }
 }
 
@@ -63,7 +72,6 @@
     ffi_type* types[kMaxArgs];
     void* values[kMaxArgs];
     ffi_type* retType;
-    const char* sig;
     char sigByte;
     int srcArg, dstArg;
 
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index 1ac6e97..817b7e6 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -51,9 +51,36 @@
 }
 
 /*
+ * Enqueue a work order - retrying until successful.  If attempt to enqueue
+ * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
+ * code cache reset.
+ */
+#define ENQUEUE_MAX_RETRIES 20
+void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
+{
+    bool success;
+    int retries = 0;
+    do {
+        success = dvmCompilerWorkEnqueue(pc, kind, info);
+        if (!success) {
+            retries++;
+            if (retries > ENQUEUE_MAX_RETRIES) {
+                LOGE("JIT: compiler queue wedged - forcing reset");
+                gDvmJit.codeCacheFull = true;  // Force reset
+                success = true;  // Because we'll drop the order now anyway
+            } else {
+                dvmLockMutex(&gDvmJit.compilerLock);
+                pthread_cond_wait(&gDvmJit.compilerQueueActivity,
+                                  &gDvmJit.compilerLock);
+                dvmUnlockMutex(&gDvmJit.compilerLock);
+
+            }
+        }
+    } while (!success);
+}
+
+/*
  * Attempt to enqueue a work order, returning true if successful.
- * This routine will not block, but simply return if it couldn't
- * aquire the lock or if the queue is full.
  *
  * NOTE: Make sure that the caller frees the info pointer if the return value
  * is false.
@@ -65,9 +92,7 @@
     int numWork;
     bool result = true;
 
-    if (dvmTryLockMutex(&gDvmJit.compilerLock)) {
-        return false;  // Couldn't acquire the lock
-    }
+    dvmLockMutex(&gDvmJit.compilerLock);
 
     /*
      * Return if queue or code cache is full.
@@ -99,6 +124,7 @@
     newOrder->result.codeAddress = NULL;
     newOrder->result.discardResult =
         (kind == kWorkOrderTraceDebug) ? true : false;
+    newOrder->result.cacheVersion = gDvmJit.cacheVersion;
     newOrder->result.requestingThread = dvmThreadSelf();
 
     gDvmJit.compilerWorkEnqueueIndex++;
@@ -178,8 +204,8 @@
     gDvmJit.codeCacheByteUsed = templateSize;
 
     /* Only flush the part in the code cache that is being used now */
-    cacheflush((intptr_t) gDvmJit.codeCache,
-               (intptr_t) gDvmJit.codeCache + templateSize, 0);
+    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
+                          (intptr_t) gDvmJit.codeCache + templateSize, 0);
 
     int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
                           PROTECT_CODE_CACHE_ATTRS);
@@ -209,7 +235,7 @@
         saveArea = SAVEAREA_FROM_FP(fp);
 
         if (print) {
-            if (dvmIsBreakFrame(fp)) {
+            if (dvmIsBreakFrame((u4*)fp)) {
                 LOGD("  #%d: break frame (%p)",
                      stackLevel, saveArea->returnAddr);
             }
@@ -264,6 +290,9 @@
     /* Lock the mutex to clean up the work queue */
     dvmLockMutex(&gDvmJit.compilerLock);
 
+    /* Update the translation cache version */
+    gDvmJit.cacheVersion++;
+
     /* Drain the work queue to free the work orders */
     while (workQueueLength()) {
         CompilerWorkOrder work = workDequeue();
@@ -281,8 +310,9 @@
     memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
            0,
            gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
-    cacheflush((intptr_t) gDvmJit.codeCache,
-               (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
+    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
+                          (intptr_t) gDvmJit.codeCache +
+                          gDvmJit.codeCacheByteUsed, 0);
 
     PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
 
@@ -301,6 +331,12 @@
     gDvmJit.compilerICPatchIndex = 0;
     dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
 
+    /*
+     * Reset the inflight compilation address (can only be done in safe points
+     * or by the compiler thread when its thread state is RUNNING).
+     */
+    gDvmJit.inflightBaseAddr = NULL;
+
     /* All clear now */
     gDvmJit.codeCacheFull = false;
 
@@ -331,6 +367,7 @@
 {
     JitEntry *pJitTable = NULL;
     unsigned char *pJitProfTable = NULL;
+    JitTraceProfCounters *pJitTraceProfCounters = NULL;
     unsigned int i;
 
     if (!dvmCompilerArchInit())
@@ -350,6 +387,9 @@
         goto fail;
     }
 
+    /* Cache the thread pointer */
+    gDvmJit.compilerThread = dvmThreadSelf();
+
     dvmLockMutex(&gDvmJit.compilerLock);
 
     /* Track method-level compilation statistics */
@@ -397,6 +437,15 @@
     /* Is chain field wide enough for termination pattern? */
     assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
 
+    /* Allocate the trace profiling structure */
+    pJitTraceProfCounters = (JitTraceProfCounters*)
+                             calloc(1, sizeof(*pJitTraceProfCounters));
+    if (!pJitTraceProfCounters) {
+        LOGE("jit trace prof counters allocation failed\n");
+        dvmUnlockMutex(&gDvmJit.tableLock);
+        goto fail;
+    }
+
     gDvmJit.pJitEntryTable = pJitTable;
     gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
     gDvmJit.jitTableEntriesUsed = 0;
@@ -408,6 +457,7 @@
      */
     gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
     gDvmJit.pProfTableCopy = pJitProfTable;
+    gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
     dvmUnlockMutex(&gDvmJit.tableLock);
 
     /* Signal running threads to refresh their cached pJitTable pointers */
@@ -619,22 +669,20 @@
                 if (gDvmJit.haltCompilerThread) {
                     LOGD("Compiler shutdown in progress - discarding request");
                 } else if (!gDvmJit.codeCacheFull) {
-                    bool compileOK = false;
                     jmp_buf jmpBuf;
                     work.bailPtr = &jmpBuf;
                     bool aborted = setjmp(jmpBuf);
                     if (!aborted) {
-                        compileOK = dvmCompilerDoWork(&work);
+                        bool codeCompiled = dvmCompilerDoWork(&work);
+                        if (codeCompiled && !work.result.discardResult &&
+                                work.result.codeAddress) {
+                            dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
+                                              work.result.instructionSet,
+                                              false, /* not method entry */
+                                              work.result.profileCodeSize);
+                        }
                     }
-                    if (aborted || !compileOK) {
-                        dvmCompilerArenaReset();
-                    } else if (!work.result.discardResult &&
-                               work.result.codeAddress) {
-                        /* Make sure that proper code addr is installed */
-                        assert(work.result.codeAddress != NULL);
-                        dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
-                                          work.result.instructionSet);
-                    }
+                    dvmCompilerArenaReset();
                 }
                 free(work.info);
 #if defined(WITH_JIT_TUNING)
@@ -691,7 +739,8 @@
     gDvmJit.pProfTable = NULL;
     gDvmJit.pProfTableCopy = NULL;
 
-    if (gDvm.verboseShutdown) {
+    if (gDvm.verboseShutdown ||
+            gDvmJit.profileMode == kTraceProfilingContinuous) {
         dvmCompilerDumpStats();
         while (gDvmJit.compilerQueueLength)
           sleep(5);
@@ -739,13 +788,36 @@
         return;
     }
 
+    /*
+     * On the first enabling of method tracing, switch the compiler
+     * into a mode that includes trace support for invokes and returns.
+     * If there are any existing translations, flush them.  NOTE:  we
+     * can't blindly flush the translation cache because this code
+     * may be executed before the compiler thread has finished
+     * initialization.
+     */
+    if ((gDvm.interpBreak & kSubModeMethodTrace) &&
+        !gDvmJit.methodTraceSupport) {
+        bool resetRequired;
+        /*
+         * compilerLock will prevent new compilations from being
+         * installed while we are working.
+         */
+        dvmLockMutex(&gDvmJit.compilerLock);
+        gDvmJit.cacheVersion++; // invalidate compilations in flight
+        gDvmJit.methodTraceSupport = true;
+        resetRequired = (gDvmJit.numCompilations != 0);
+        dvmUnlockMutex(&gDvmJit.compilerLock);
+        if (resetRequired) {
+            dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
+            resetCodeCache();
+            dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
+        }
+    }
+
     dvmLockMutex(&gDvmJit.tableLock);
     jitActive = gDvmJit.pProfTable != NULL;
-    bool disableJit = gDvm.debuggerActive;
-#if !defined(WITH_INLINE_PROFILING)
-    disableJit = disableJit || (gDvm.activeProfilers > 0);
-#endif
-    jitActivate = !disableJit;
+    jitActivate = !dvmDebuggerOrProfilerActive();
 
     if (jitActivate && !jitActive) {
         gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h
index 6dd9cbd..2a0eef4 100644
--- a/vm/compiler/Compiler.h
+++ b/vm/compiler/Compiler.h
@@ -25,7 +25,6 @@
  * #define SIGNATURE_BREAKPOINT
  */
 
-#define MAX_JIT_RUN_LEN                 64
 #define COMPILER_WORK_QUEUE_SIZE        100
 #define COMPILER_IC_PATCH_QUEUE_SIZE    64
 
@@ -45,15 +44,8 @@
 #define COMPILER_TRACE_CHAINING(X)
 
 /* Macro to change the permissions applied to a chunk of the code cache */
-#if !defined(WITH_JIT_TUNING)
 #define PROTECT_CODE_CACHE_ATTRS       (PROT_READ | PROT_EXEC)
 #define UNPROTECT_CODE_CACHE_ATTRS     (PROT_READ | PROT_EXEC | PROT_WRITE)
-#else
-/* When doing JIT profiling always grant the write permission */
-#define PROTECT_CODE_CACHE_ATTRS       (PROT_READ | PROT_EXEC |                \
-                                  (gDvmJit.profile ? PROT_WRITE : 0))
-#define UNPROTECT_CODE_CACHE_ATTRS     (PROT_READ | PROT_EXEC | PROT_WRITE)
-#endif
 
 /* Acquire the lock before removing PROT_WRITE from the specified mem region */
 #define UNPROTECT_CODE_CACHE(addr, size)                                       \
@@ -82,7 +74,6 @@
     DALVIK_JIT_ARM,
     DALVIK_JIT_THUMB,
     DALVIK_JIT_THUMB2,
-    DALVIK_JIT_THUMB2EE,
     DALVIK_JIT_IA32
 } JitInstructionSetType;
 
@@ -90,9 +81,11 @@
 typedef struct JitTranslationInfo {
     void *codeAddress;
     JitInstructionSetType instructionSet;
+    int profileCodeSize;
     bool discardResult;         // Used for debugging divergence and IC patching
     bool methodCompilationAborted;  // Cannot compile the whole method
     Thread *requestingThread;   // For debugging purpose
+    int cacheVersion;           // Used to identify stale trace requests
 } JitTranslationInfo;
 
 typedef enum WorkOrderKind {
@@ -100,6 +93,7 @@
     kWorkOrderMethod = 1,       // Work is to compile a whole method
     kWorkOrderTrace = 2,        // Work is to compile code fragment(s)
     kWorkOrderTraceDebug = 3,   // Work is to compile/debug code fragment(s)
+    kWorkOrderProfileMode = 4,  // Change profiling mode
 } WorkOrderKind;
 
 typedef struct CompilerWorkOrder {
@@ -122,84 +116,11 @@
 typedef struct ICPatchWorkOrder {
     PredictedChainingCell *cellAddr;    /* Address to be patched */
     PredictedChainingCell cellContent;  /* content of the new cell */
+    const char *classDescriptor;        /* Descriptor of the class object */
+    Object *classLoader;                /* Class loader */
+    u4 serialNumber;                    /* Serial # (for verification only) */
 } ICPatchWorkOrder;
 
-/* States of the dbg interpreter when serving a JIT-related request */
-typedef enum JitState {
-    /* Entering states in the debug interpreter */
-    kJitNot = 0,               // Non-JIT related reasons */
-    kJitTSelectRequest = 1,    // Request a trace (subject to filtering)
-    kJitTSelectRequestHot = 2, // Request a hot trace (bypass the filter)
-    kJitSelfVerification = 3,  // Self Verification Mode
-
-    /* Operational states in the debug interpreter */
-    kJitTSelect = 4,           // Actively selecting a trace
-    kJitTSelectEnd = 5,        // Done with the trace - wrap it up
-    kJitSingleStep = 6,        // Single step interpretation
-    kJitSingleStepEnd = 7,     // Done with single step, ready return to mterp
-    kJitDone = 8,              // Ready to leave the debug interpreter
-} JitState;
-
-#if defined(WITH_SELF_VERIFICATION)
-typedef enum SelfVerificationState {
-    kSVSIdle = 0,           // Idle
-    kSVSStart = 1,          // Shadow space set up, running compiled code
-    kSVSPunt = 2,           // Exiting compiled code by punting
-    kSVSSingleStep = 3,     // Exiting compiled code by single stepping
-    kSVSNoProfile = 4,      // Exiting compiled code and don't collect profiles
-    kSVSTraceSelect = 5,    // Exiting compiled code and compile the next pc
-    kSVSNormal = 6,         // Exiting compiled code normally
-    kSVSNoChain = 7,        // Exiting compiled code by no chain
-    kSVSBackwardBranch = 8, // Exiting compiled code with backward branch trace
-    kSVSDebugInterp = 9,    // Normal state restored, running debug interpreter
-} SelfVerificationState;
-#endif
-
-typedef enum JitHint {
-   kJitHintNone = 0,
-   kJitHintTaken = 1,         // Last inst in run was taken branch
-   kJitHintNotTaken = 2,      // Last inst in run was not taken branch
-   kJitHintNoBias = 3,        // Last inst in run was unbiased branch
-} jitHint;
-
-/*
- * Element of a Jit trace description. If the isCode bit is set, it describes
- * a contiguous sequence of Dalvik byte codes.
- */
-typedef struct {
-    unsigned isCode:1;       // If set denotes code fragments
-    unsigned numInsts:8;     // Number of Byte codes in run
-    unsigned runEnd:1;       // Run ends with last byte code
-    jitHint  hint:6;         // Hint to apply to final code of run
-    u2    startOffset;       // Starting offset for trace run
-} JitCodeDesc;
-
-/*
- * A complete list of trace runs passed to the compiler looks like the
- * following:
- *   frag1
- *   frag2
- *   frag3
- *   meta1
- *   meta2
- *   frag4
- *
- * frags 1-4 have the "isCode" field set, and metas 1-2 are plain pointers or
- * pointers to auxiliary data structures as long as the LSB is null.
- * The meaning of the meta content is loosely defined. It is usually the code
- * fragment right before the first meta field (frag3 in this case) to
- * understand and parse them. Frag4 could be a dummy one with 0 "numInsts" but
- * the "runEnd" field set.
- *
- * For example, if a trace run contains a method inlining target, the class
- * type of "this" and the currently resolved method pointer are two instances
- * of meta information stored there.
- */
-typedef union {
-    JitCodeDesc frag;
-    void*       meta;
-} JitTraceRun;
-
 /*
  * Trace description as will appear in the translation cache.  Note
  * flexible array at end, as these will be of variable size.  To
@@ -219,6 +140,7 @@
     kIsThrowFree,       /* Method doesn't throw */
     kIsGetter,          /* Method fits the getter pattern */
     kIsSetter,          /* Method fits the setter pattern */
+    kCannotCompile,     /* Method cannot be compiled */
 } JitMethodAttributes;
 
 #define METHOD_IS_CALLEE        (1 << kIsCallee)
@@ -228,6 +150,7 @@
 #define METHOD_IS_THROW_FREE    (1 << kIsThrowFree)
 #define METHOD_IS_GETTER        (1 << kIsGetter)
 #define METHOD_IS_SETTER        (1 << kIsSetter)
+#define METHOD_CANNOT_COMPILE   (1 << kCannotCompile)
 
 /* Vectors to provide optimization hints */
 typedef enum JitOptimizationHints {
@@ -236,6 +159,15 @@
 
 #define JIT_OPT_NO_LOOP         (1 << kJitOptNoLoop)
 
+/* Customized node traversal orders for different needs */
+typedef enum DataFlowAnalysisMode {
+    kAllNodes = 0,              // All nodes
+    kReachableNodes,            // All reachable nodes
+    kPreOrderDFSTraversal,      // Depth-First-Search / Pre-Order
+    kPostOrderDFSTraversal,     // Depth-First-Search / Post-Order
+    kPostOrderDOMTraversal,     // Dominator tree / Post-Order
+} DataFlowAnalysisMode;
+
 typedef struct CompilerMethodStats {
     const Method *method;       // Used as hash entry signature
     int dalvikSize;             // # of bytes for dalvik bytecodes
@@ -256,41 +188,54 @@
 void dvmCompilerArchDump(void);
 bool dvmCompilerStartup(void);
 void dvmCompilerShutdown(void);
+void dvmCompilerForceWorkEnqueue(const u2* pc, WorkOrderKind kind, void* info);
 bool dvmCompilerWorkEnqueue(const u2* pc, WorkOrderKind kind, void* info);
 void *dvmCheckCodeCache(void *method);
 CompilerMethodStats *dvmCompilerAnalyzeMethodBody(const Method *method,
                                                   bool isCallee);
 bool dvmCompilerCanIncludeThisInstruction(const Method *method,
                                           const DecodedInstruction *insn);
-bool dvmCompileMethod(struct CompilationUnit *cUnit, const Method *method,
-                      JitTranslationInfo *info);
+bool dvmCompileMethod(const Method *method, JitTranslationInfo *info);
 bool dvmCompileTrace(JitTraceDescription *trace, int numMaxInsts,
                      JitTranslationInfo *info, jmp_buf *bailPtr, int optHints);
 void dvmCompilerDumpStats(void);
 void dvmCompilerDrainQueue(void);
 void dvmJitUnchainAll(void);
+void dvmJitScanAllClassPointers(void (*callback)(void *ptr));
 void dvmCompilerSortAndPrintTraceProfiles(void);
 void dvmCompilerPerformSafePointChecks(void);
-void dvmCompilerInlineMIR(struct CompilationUnit *cUnit);
+void dvmCompilerInlineMIR(struct CompilationUnit *cUnit,
+                          JitTranslationInfo *info);
 void dvmInitializeSSAConversion(struct CompilationUnit *cUnit);
-int dvmConvertSSARegToDalvik(struct CompilationUnit *cUnit, int ssaReg);
+int dvmConvertSSARegToDalvik(const struct CompilationUnit *cUnit, int ssaReg);
 bool dvmCompilerLoopOpt(struct CompilationUnit *cUnit);
 void dvmCompilerNonLoopAnalysis(struct CompilationUnit *cUnit);
-void dvmCompilerFindLiveIn(struct CompilationUnit *cUnit,
-                           struct BasicBlock *bb);
-void dvmCompilerDoSSAConversion(struct CompilationUnit *cUnit,
+bool dvmCompilerFindLocalLiveIn(struct CompilationUnit *cUnit,
                                 struct BasicBlock *bb);
-void dvmCompilerDoConstantPropagation(struct CompilationUnit *cUnit,
+bool dvmCompilerDoSSAConversion(struct CompilationUnit *cUnit,
+                                struct BasicBlock *bb);
+bool dvmCompilerDoConstantPropagation(struct CompilationUnit *cUnit,
                                       struct BasicBlock *bb);
-void dvmCompilerFindInductionVariables(struct CompilationUnit *cUnit,
+bool dvmCompilerFindInductionVariables(struct CompilationUnit *cUnit,
                                        struct BasicBlock *bb);
-char *dvmCompilerGetDalvikDisassembly(DecodedInstruction *insn, char *note);
+/* Clear the visited flag for each BB */
+bool dvmCompilerClearVisitedFlag(struct CompilationUnit *cUnit,
+                                 struct BasicBlock *bb);
+char *dvmCompilerGetDalvikDisassembly(const DecodedInstruction *insn,
+                                      char *note);
+char *dvmCompilerFullDisassembler(const struct CompilationUnit *cUnit,
+                                  const struct MIR *mir);
 char *dvmCompilerGetSSAString(struct CompilationUnit *cUnit,
                               struct SSARepresentation *ssaRep);
 void dvmCompilerDataFlowAnalysisDispatcher(struct CompilationUnit *cUnit,
-                void (*func)(struct CompilationUnit *, struct BasicBlock *));
+                bool (*func)(struct CompilationUnit *, struct BasicBlock *),
+                DataFlowAnalysisMode dfaMode,
+                bool isIterative);
+void dvmCompilerMethodSSATransformation(struct CompilationUnit *cUnit);
 void dvmCompilerStateRefresh(void);
 JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
                                             const struct JitEntry *desc);
 void *dvmCompilerGetInterpretTemplate();
+JitInstructionSetType dvmCompilerGetInterpretTemplateSet();
+u8 dvmGetRegResourceMask(int reg);
 #endif /* _DALVIK_VM_COMPILER */
diff --git a/vm/compiler/CompilerIR.h b/vm/compiler/CompilerIR.h
index 712ca4c..c807877 100644
--- a/vm/compiler/CompilerIR.h
+++ b/vm/compiler/CompilerIR.h
@@ -61,6 +61,7 @@
     kMethodExitBlock,
     kPCReconstruction,
     kExceptionHandling,
+    kCatchEntry,
 } BBType;
 
 typedef struct ChainCellCounts {
@@ -98,6 +99,7 @@
     kMIRInlined,                        // Invoke is inlined (ie dead)
     kMIRInlinedPred,                    // Invoke is inlined via prediction
     kMIRCallee,                         // Instruction is inlined from callee
+    kMIRInvokeMethodJIT,                // Callee is JIT'ed as a whole method
 } MIROptimizationFlagPositons;
 
 #define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
@@ -107,9 +109,11 @@
 #define MIR_INLINED                     (1 << kMIRInlined)
 #define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
 #define MIR_CALLEE                      (1 << kMIRCallee)
+#define MIR_INVOKE_METHOD_JIT           (1 << kMIRInvokeMethodJIT)
 
 typedef struct CallsiteInfo {
-    const ClassObject *clazz;
+    const char *classDescriptor;
+    Object *classLoader;
     const Method *method;
     LIR *misPredBranchOver;
 } CallsiteInfo;
@@ -133,9 +137,17 @@
 
 struct BasicBlockDataFlow;
 
+/* For successorBlockList */
+typedef enum BlockListType {
+    kNotUsed = 0,
+    kCatch,
+    kPackedSwitch,
+    kSparseSwitch,
+} BlockListType;
+
 typedef struct BasicBlock {
     int id;
-    int visited;
+    bool visited;
     unsigned int startOffset;
     const Method *containingMethod;     // For blocks from the callee
     BBType blockType;
@@ -145,10 +157,29 @@
     MIR *lastMIRInsn;
     struct BasicBlock *fallThrough;
     struct BasicBlock *taken;
-    struct BasicBlock *next;            // Serial link for book keeping purposes
+    struct BasicBlock *iDom;            // Immediate dominator
     struct BasicBlockDataFlow *dataFlowInfo;
+    BitVector *predecessors;
+    BitVector *dominators;
+    BitVector *iDominated;              // Set nodes being immediately dominated
+    BitVector *domFrontier;             // Dominance frontier
+    struct {                            // For one-to-many successors like
+        BlockListType blockListType;    // switch and exception handling
+        GrowableList blocks;
+    } successorBlockList;
 } BasicBlock;
 
+/*
+ * The "blocks" field in "successorBlockList" points to an array of
+ * elements with the type "SuccessorBlockInfo".
+ * For catch blocks, key is type index for the exception.
+ * For swtich blocks, key is the case value.
+ */
+typedef struct SuccessorBlockInfo {
+    BasicBlock *block;
+    int key;
+} SuccessorBlockInfo;
+
 struct LoopAnalysis;
 struct RegisterPool;
 
@@ -161,12 +192,14 @@
 typedef struct CompilationUnit {
     int numInsts;
     int numBlocks;
-    BasicBlock **blockList;
+    GrowableList blockList;
     const Method *method;
     const JitTraceDescription *traceDesc;
     LIR *firstLIRInsn;
     LIR *lastLIRInsn;
-    LIR *wordList;
+    LIR *literalList;                   // Constants
+    LIR *classPointerList;              // Relocatable
+    int numClassPointers;
     LIR *chainCellOffsetLIR;
     GrowableList pcReconstructionList;
     int headerSize;                     // bytes before the first code ptr
@@ -178,11 +211,12 @@
     void *baseAddr;
     bool printMe;
     bool allSingleStep;
-    bool executionCount;                // Add code to count trace executions
+    bool hasClassLiterals;              // Contains class ptrs used as literals
     bool hasLoop;                       // Contains a loop
     bool hasInvoke;                     // Contains an invoke instruction
     bool heapMemOp;                     // Mark mem ops for self verification
-    bool wholeMethod;
+    bool usesLinkRegister;              // For self-verification only
+    int profileCodeSize;                // Size of the profile prefix in bytes
     int numChainingCells[kChainingCellGap];
     LIR *firstChainingLIR[kChainingCellGap];
     LIR *chainingCellBottom;
@@ -213,6 +247,24 @@
      * MAX_CHAINED_SWITCH_CASES cases.
      */
     const u2 *switchOverflowPad;
+
+    /* New fields only for method-based compilation */
+    bool methodJitMode;
+    int numReachableBlocks;
+    int numDalvikRegisters;             // method->registersSize + inlined
+    BasicBlock *entryBlock;
+    BasicBlock *exitBlock;
+    BasicBlock *curBlock;
+    BasicBlock *nextCodegenBlock;       // for extended trace codegen
+    GrowableList dfsOrder;
+    GrowableList domPostOrderTraversal;
+    BitVector *tryBlockAddr;
+    BitVector **defBlockMatrix;         // numDalvikRegister x numBlocks
+    BitVector *tempBlockV;
+    BitVector *tempDalvikRegisterV;
+    BitVector *tempSSARegisterV;        // numSSARegs
+    bool printSSANames;
+    void *blockLabelList;
 } CompilationUnit;
 
 #if defined(WITH_SELF_VERIFICATION)
@@ -221,7 +273,7 @@
 #define HEAP_ACCESS_SHADOW(_state)
 #endif
 
-BasicBlock *dvmCompilerNewBB(BBType blockType);
+BasicBlock *dvmCompilerNewBB(BBType blockType, int blockId);
 
 void dvmCompilerAppendMIR(BasicBlock *bb, MIR *mir);
 
diff --git a/vm/compiler/CompilerUtility.h b/vm/compiler/CompilerUtility.h
index 551edb8..5dd1faf 100644
--- a/vm/compiler/CompilerUtility.h
+++ b/vm/compiler/CompilerUtility.h
@@ -39,19 +39,41 @@
 typedef struct GrowableList {
     size_t numAllocated;
     size_t numUsed;
-    void **elemList;
+    intptr_t *elemList;
 } GrowableList;
 
+typedef struct GrowableListIterator {
+    GrowableList *list;
+    size_t idx;
+    size_t size;
+} GrowableListIterator;
+
 #define GET_ELEM_N(LIST, TYPE, N) (((TYPE*) LIST->elemList)[N])
 
+#define BLOCK_NAME_LEN 80
+
+/* Forward declarations */
 struct LIR;
+struct BasicBlock;
 
 void dvmInitGrowableList(GrowableList *gList, size_t initLength);
-void dvmInsertGrowableList(GrowableList *gList, void *elem);
-BitVector* dvmCompilerAllocBitVector(int startBits, bool expandable);
-bool dvmCompilerSetBit(BitVector* pBits, int num);
+void dvmInsertGrowableList(GrowableList *gList, intptr_t elem);
+void dvmGrowableListIteratorInit(GrowableList *gList,
+                                 GrowableListIterator *iterator);
+intptr_t dvmGrowableListIteratorNext(GrowableListIterator *iterator);
+intptr_t dvmGrowableListGetElement(const GrowableList *gList, size_t idx);
+
+BitVector* dvmCompilerAllocBitVector(unsigned int startBits, bool expandable);
+bool dvmCompilerSetBit(BitVector* pBits, unsigned int num);
+bool dvmCompilerClearBit(BitVector* pBits, unsigned int num);
+void dvmCompilerMarkAllBits(BitVector *pBits, bool set);
 void dvmDebugBitVector(char *msg, const BitVector *bv, int length);
 void dvmDumpLIRInsn(struct LIR *lir, unsigned char *baseAddr);
 void dvmDumpResourceMask(struct LIR *lir, u8 mask, const char *prefix);
+void dvmDumpBlockBitVector(const GrowableList *blocks, char *msg,
+                           const BitVector *bv, int length);
+void dvmGetBlockName(struct BasicBlock *bb, char *name);
+int dvmCompilerCacheFlush(long start, long end, long flags);
+
 
 #endif /* _DALVIK_COMPILER_UTILITY */
diff --git a/vm/compiler/Dataflow.c b/vm/compiler/Dataflow.c
index 82f52b9..76744bd 100644
--- a/vm/compiler/Dataflow.c
+++ b/vm/compiler/Dataflow.c
@@ -750,7 +750,7 @@
     // EF OP_EXECUTE_INLINE_RANGE
     DF_FORMAT_3RC,
 
-    // F0 OP_INVOKE_DIRECT_EMPTY
+    // F0 OP_INVOKE_OBJECT_INIT_RANGE
     DF_NOP,
 
     // F1 OP_RETURN_VOID_BARRIER
@@ -798,10 +798,777 @@
     // FF OP_DISPATCH_FF
     DF_NOP,
 
-    // Beginning of extended MIR opcodes
-    // 100 OP_MIR_PHI
-    DF_PHI | DF_DA,
+    // 100 OP_CONST_CLASS_JUMBO vAAAA, type@BBBBBBBB
+    DF_DA,
 
+    // 101 OP_CHECK_CAST_JUMBO vAAAA, type@BBBBBBBB
+    DF_UA,
+
+    // 102 OP_INSTANCE_OF_JUMBO vAAAA, vBBBB, type@CCCCCCCC
+    DF_DA | DF_UB,
+
+    // 103 OP_NEW_INSTANCE_JUMBO vAAAA, type@BBBBBBBB
+    DF_DA,
+
+    // 104 OP_NEW_ARRAY_JUMBO vAAAA, vBBBB, type@CCCCCCCC
+    DF_DA | DF_UB,
+
+    // 105 OP_FILLED_NEW_ARRAY_JUMBO {vCCCC .. vNNNN}, type@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 106 OP_IGET_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 107 OP_IGET_WIDE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA_WIDE | DF_UB | DF_IS_GETTER,
+
+    // 108 OP_IGET_OBJECT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 109 OP_IGET_BOOLEAN_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 10A OP_IGET_BYTE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 10B OP_IGET_CHAR_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 10C OP_IGET_SHORT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_UB | DF_IS_GETTER,
+
+    // 10D OP_IPUT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 10E OP_IPUT_WIDE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA_WIDE | DF_UB | DF_IS_SETTER,
+
+    // 10F OP_IPUT_OBJECT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 110 OP_IPUT_BOOLEAN_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 111 OP_IPUT_BYTE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 112 OP_IPUT_CHAR_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 113 OP_IPUT_SHORT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_UB | DF_IS_SETTER,
+
+    // 114 OP_SGET_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 115 OP_SGET_WIDE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA_WIDE | DF_IS_GETTER,
+
+    // 116 OP_SGET_OBJECT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 117 OP_SGET_BOOLEAN_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 118 OP_SGET_BYTE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 119 OP_SGET_CHAR_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 11A OP_SGET_SHORT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_DA | DF_IS_GETTER,
+
+    // 11B OP_SPUT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 11C OP_SPUT_WIDE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA_WIDE | DF_IS_SETTER,
+
+    // 11D OP_SPUT_OBJECT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 11E OP_SPUT_BOOLEAN_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 11F OP_SPUT_BYTE_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 120 OP_SPUT_CHAR_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 121 OP_SPUT_SHORT_JUMBO vAAAA, vBBBB, field@CCCCCCCC
+    DF_UA | DF_IS_SETTER,
+
+    // 122 OP_INVOKE_VIRTUAL_JUMBO {vCCCC .. vNNNN}, meth@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 123 OP_INVOKE_SUPER_JUMBO {vCCCC .. vNNNN}, meth@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 124 OP_INVOKE_DIRECT_JUMBO {vCCCC .. vNNNN}, meth@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 125 OP_INVOKE_STATIC_JUMBO {vCCCC .. vNNNN}, meth@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 126 OP_INVOKE_INTERFACE_JUMBO {vCCCC .. vNNNN}, meth@BBBBBBBB
+    DF_FORMAT_3RC,
+
+    // 127 OP_UNUSED_27FF
+    DF_NOP,
+
+    // 128 OP_UNUSED_28FF
+    DF_NOP,
+
+    // 129 OP_UNUSED_29FF
+    DF_NOP,
+
+    // 12A OP_UNUSED_2AFF
+    DF_NOP,
+
+    // 12B OP_UNUSED_2BFF
+    DF_NOP,
+
+    // 12C OP_UNUSED_2CFF
+    DF_NOP,
+
+    // 12D OP_UNUSED_2DFF
+    DF_NOP,
+
+    // 12E OP_UNUSED_2EFF
+    DF_NOP,
+
+    // 12F OP_UNUSED_2FFF
+    DF_NOP,
+
+    // 130 OP_UNUSED_30FF
+    DF_NOP,
+
+    // 131 OP_UNUSED_31FF
+    DF_NOP,
+
+    // 132 OP_UNUSED_32FF
+    DF_NOP,
+
+    // 133 OP_UNUSED_33FF
+    DF_NOP,
+
+    // 134 OP_UNUSED_34FF
+    DF_NOP,
+
+    // 135 OP_UNUSED_35FF
+    DF_NOP,
+
+    // 136 OP_UNUSED_36FF
+    DF_NOP,
+
+    // 137 OP_UNUSED_37FF
+    DF_NOP,
+
+    // 138 OP_UNUSED_38FF
+    DF_NOP,
+
+    // 139 OP_UNUSED_39FF
+    DF_NOP,
+
+    // 13A OP_UNUSED_3AFF
+    DF_NOP,
+
+    // 13B OP_UNUSED_3BFF
+    DF_NOP,
+
+    // 13C OP_UNUSED_3CFF
+    DF_NOP,
+
+    // 13D OP_UNUSED_3DFF
+    DF_NOP,
+
+    // 13E OP_UNUSED_3EFF
+    DF_NOP,
+
+    // 13F OP_UNUSED_3FFF
+    DF_NOP,
+
+    // 140 OP_UNUSED_40FF
+    DF_NOP,
+
+    // 141 OP_UNUSED_41FF
+    DF_NOP,
+
+    // 142 OP_UNUSED_42FF
+    DF_NOP,
+
+    // 143 OP_UNUSED_43FF
+    DF_NOP,
+
+    // 144 OP_UNUSED_44FF
+    DF_NOP,
+
+    // 145 OP_UNUSED_45FF
+    DF_NOP,
+
+    // 146 OP_UNUSED_46FF
+    DF_NOP,
+
+    // 147 OP_UNUSED_47FF
+    DF_NOP,
+
+    // 148 OP_UNUSED_48FF
+    DF_NOP,
+
+    // 149 OP_UNUSED_49FF
+    DF_NOP,
+
+    // 14A OP_UNUSED_4AFF
+    DF_NOP,
+
+    // 14B OP_UNUSED_4BFF
+    DF_NOP,
+
+    // 14C OP_UNUSED_4CFF
+    DF_NOP,
+
+    // 14D OP_UNUSED_4DFF
+    DF_NOP,
+
+    // 14E OP_UNUSED_4EFF
+    DF_NOP,
+
+    // 14F OP_UNUSED_4FFF
+    DF_NOP,
+
+    // 150 OP_UNUSED_50FF
+    DF_NOP,
+
+    // 151 OP_UNUSED_51FF
+    DF_NOP,
+
+    // 152 OP_UNUSED_52FF
+    DF_NOP,
+
+    // 153 OP_UNUSED_53FF
+    DF_NOP,
+
+    // 154 OP_UNUSED_54FF
+    DF_NOP,
+
+    // 155 OP_UNUSED_55FF
+    DF_NOP,
+
+    // 156 OP_UNUSED_56FF
+    DF_NOP,
+
+    // 157 OP_UNUSED_57FF
+    DF_NOP,
+
+    // 158 OP_UNUSED_58FF
+    DF_NOP,
+
+    // 159 OP_UNUSED_59FF
+    DF_NOP,
+
+    // 15A OP_UNUSED_5AFF
+    DF_NOP,
+
+    // 15B OP_UNUSED_5BFF
+    DF_NOP,
+
+    // 15C OP_UNUSED_5CFF
+    DF_NOP,
+
+    // 15D OP_UNUSED_5DFF
+    DF_NOP,
+
+    // 15E OP_UNUSED_5EFF
+    DF_NOP,
+
+    // 15F OP_UNUSED_5FFF
+    DF_NOP,
+
+    // 160 OP_UNUSED_60FF
+    DF_NOP,
+
+    // 161 OP_UNUSED_61FF
+    DF_NOP,
+
+    // 162 OP_UNUSED_62FF
+    DF_NOP,
+
+    // 163 OP_UNUSED_63FF
+    DF_NOP,
+
+    // 164 OP_UNUSED_64FF
+    DF_NOP,
+
+    // 165 OP_UNUSED_65FF
+    DF_NOP,
+
+    // 166 OP_UNUSED_66FF
+    DF_NOP,
+
+    // 167 OP_UNUSED_67FF
+    DF_NOP,
+
+    // 168 OP_UNUSED_68FF
+    DF_NOP,
+
+    // 169 OP_UNUSED_69FF
+    DF_NOP,
+
+    // 16A OP_UNUSED_6AFF
+    DF_NOP,
+
+    // 16B OP_UNUSED_6BFF
+    DF_NOP,
+
+    // 16C OP_UNUSED_6CFF
+    DF_NOP,
+
+    // 16D OP_UNUSED_6DFF
+    DF_NOP,
+
+    // 16E OP_UNUSED_6EFF
+    DF_NOP,
+
+    // 16F OP_UNUSED_6FFF
+    DF_NOP,
+
+    // 170 OP_UNUSED_70FF
+    DF_NOP,
+
+    // 171 OP_UNUSED_71FF
+    DF_NOP,
+
+    // 172 OP_UNUSED_72FF
+    DF_NOP,
+
+    // 173 OP_UNUSED_73FF
+    DF_NOP,
+
+    // 174 OP_UNUSED_74FF
+    DF_NOP,
+
+    // 175 OP_UNUSED_75FF
+    DF_NOP,
+
+    // 176 OP_UNUSED_76FF
+    DF_NOP,
+
+    // 177 OP_UNUSED_77FF
+    DF_NOP,
+
+    // 178 OP_UNUSED_78FF
+    DF_NOP,
+
+    // 179 OP_UNUSED_79FF
+    DF_NOP,
+
+    // 17A OP_UNUSED_7AFF
+    DF_NOP,
+
+    // 17B OP_UNUSED_7BFF
+    DF_NOP,
+
+    // 17C OP_UNUSED_7CFF
+    DF_NOP,
+
+    // 17D OP_UNUSED_7DFF
+    DF_NOP,
+
+    // 17E OP_UNUSED_7EFF
+    DF_NOP,
+
+    // 17F OP_UNUSED_7FFF
+    DF_NOP,
+
+    // 180 OP_UNUSED_80FF
+    DF_NOP,
+
+    // 181 OP_UNUSED_81FF
+    DF_NOP,
+
+    // 182 OP_UNUSED_82FF
+    DF_NOP,
+
+    // 183 OP_UNUSED_83FF
+    DF_NOP,
+
+    // 184 OP_UNUSED_84FF
+    DF_NOP,
+
+    // 185 OP_UNUSED_85FF
+    DF_NOP,
+
+    // 186 OP_UNUSED_86FF
+    DF_NOP,
+
+    // 187 OP_UNUSED_87FF
+    DF_NOP,
+
+    // 188 OP_UNUSED_88FF
+    DF_NOP,
+
+    // 189 OP_UNUSED_89FF
+    DF_NOP,
+
+    // 18A OP_UNUSED_8AFF
+    DF_NOP,
+
+    // 18B OP_UNUSED_8BFF
+    DF_NOP,
+
+    // 18C OP_UNUSED_8CFF
+    DF_NOP,
+
+    // 18D OP_UNUSED_8DFF
+    DF_NOP,
+
+    // 18E OP_UNUSED_8EFF
+    DF_NOP,
+
+    // 18F OP_UNUSED_8FFF
+    DF_NOP,
+
+    // 190 OP_UNUSED_90FF
+    DF_NOP,
+
+    // 191 OP_UNUSED_91FF
+    DF_NOP,
+
+    // 192 OP_UNUSED_92FF
+    DF_NOP,
+
+    // 193 OP_UNUSED_93FF
+    DF_NOP,
+
+    // 194 OP_UNUSED_94FF
+    DF_NOP,
+
+    // 195 OP_UNUSED_95FF
+    DF_NOP,
+
+    // 196 OP_UNUSED_96FF
+    DF_NOP,
+
+    // 197 OP_UNUSED_97FF
+    DF_NOP,
+
+    // 198 OP_UNUSED_98FF
+    DF_NOP,
+
+    // 199 OP_UNUSED_99FF
+    DF_NOP,
+
+    // 19A OP_UNUSED_9AFF
+    DF_NOP,
+
+    // 19B OP_UNUSED_9BFF
+    DF_NOP,
+
+    // 19C OP_UNUSED_9CFF
+    DF_NOP,
+
+    // 19D OP_UNUSED_9DFF
+    DF_NOP,
+
+    // 19E OP_UNUSED_9EFF
+    DF_NOP,
+
+    // 19F OP_UNUSED_9FFF
+    DF_NOP,
+
+    // 1A0 OP_UNUSED_A0FF
+    DF_NOP,
+
+    // 1A1 OP_UNUSED_A1FF
+    DF_NOP,
+
+    // 1A2 OP_UNUSED_A2FF
+    DF_NOP,
+
+    // 1A3 OP_UNUSED_A3FF
+    DF_NOP,
+
+    // 1A4 OP_UNUSED_A4FF
+    DF_NOP,
+
+    // 1A5 OP_UNUSED_A5FF
+    DF_NOP,
+
+    // 1A6 OP_UNUSED_A6FF
+    DF_NOP,
+
+    // 1A7 OP_UNUSED_A7FF
+    DF_NOP,
+
+    // 1A8 OP_UNUSED_A8FF
+    DF_NOP,
+
+    // 1A9 OP_UNUSED_A9FF
+    DF_NOP,
+
+    // 1AA OP_UNUSED_AAFF
+    DF_NOP,
+
+    // 1AB OP_UNUSED_ABFF
+    DF_NOP,
+
+    // 1AC OP_UNUSED_ACFF
+    DF_NOP,
+
+    // 1AD OP_UNUSED_ADFF
+    DF_NOP,
+
+    // 1AE OP_UNUSED_AEFF
+    DF_NOP,
+
+    // 1AF OP_UNUSED_AFFF
+    DF_NOP,
+
+    // 1B0 OP_UNUSED_B0FF
+    DF_NOP,
+
+    // 1B1 OP_UNUSED_B1FF
+    DF_NOP,
+
+    // 1B2 OP_UNUSED_B2FF
+    DF_NOP,
+
+    // 1B3 OP_UNUSED_B3FF
+    DF_NOP,
+
+    // 1B4 OP_UNUSED_B4FF
+    DF_NOP,
+
+    // 1B5 OP_UNUSED_B5FF
+    DF_NOP,
+
+    // 1B6 OP_UNUSED_B6FF
+    DF_NOP,
+
+    // 1B7 OP_UNUSED_B7FF
+    DF_NOP,
+
+    // 1B8 OP_UNUSED_B8FF
+    DF_NOP,
+
+    // 1B9 OP_UNUSED_B9FF
+    DF_NOP,
+
+    // 1BA OP_UNUSED_BAFF
+    DF_NOP,
+
+    // 1BB OP_UNUSED_BBFF
+    DF_NOP,
+
+    // 1BC OP_UNUSED_BCFF
+    DF_NOP,
+
+    // 1BD OP_UNUSED_BDFF
+    DF_NOP,
+
+    // 1BE OP_UNUSED_BEFF
+    DF_NOP,
+
+    // 1BF OP_UNUSED_BFFF
+    DF_NOP,
+
+    // 1C0 OP_UNUSED_C0FF
+    DF_NOP,
+
+    // 1C1 OP_UNUSED_C1FF
+    DF_NOP,
+
+    // 1C2 OP_UNUSED_C2FF
+    DF_NOP,
+
+    // 1C3 OP_UNUSED_C3FF
+    DF_NOP,
+
+    // 1C4 OP_UNUSED_C4FF
+    DF_NOP,
+
+    // 1C5 OP_UNUSED_C5FF
+    DF_NOP,
+
+    // 1C6 OP_UNUSED_C6FF
+    DF_NOP,
+
+    // 1C7 OP_UNUSED_C7FF
+    DF_NOP,
+
+    // 1C8 OP_UNUSED_C8FF
+    DF_NOP,
+
+    // 1C9 OP_UNUSED_C9FF
+    DF_NOP,
+
+    // 1CA OP_UNUSED_CAFF
+    DF_NOP,
+
+    // 1CB OP_UNUSED_CBFF
+    DF_NOP,
+
+    // 1CC OP_UNUSED_CCFF
+    DF_NOP,
+
+    // 1CD OP_UNUSED_CDFF
+    DF_NOP,
+
+    // 1CE OP_UNUSED_CEFF
+    DF_NOP,
+
+    // 1CF OP_UNUSED_CFFF
+    DF_NOP,
+
+    // 1D0 OP_UNUSED_D0FF
+    DF_NOP,
+
+    // 1D1 OP_UNUSED_D1FF
+    DF_NOP,
+
+    // 1D2 OP_UNUSED_D2FF
+    DF_NOP,
+
+    // 1D3 OP_UNUSED_D3FF
+    DF_NOP,
+
+    // 1D4 OP_UNUSED_D4FF
+    DF_NOP,
+
+    // 1D5 OP_UNUSED_D5FF
+    DF_NOP,
+
+    // 1D6 OP_UNUSED_D6FF
+    DF_NOP,
+
+    // 1D7 OP_UNUSED_D7FF
+    DF_NOP,
+
+    // 1D8 OP_UNUSED_D8FF
+    DF_NOP,
+
+    // 1D9 OP_UNUSED_D9FF
+    DF_NOP,
+
+    // 1DA OP_UNUSED_DAFF
+    DF_NOP,
+
+    // 1DB OP_UNUSED_DBFF
+    DF_NOP,
+
+    // 1DC OP_UNUSED_DCFF
+    DF_NOP,
+
+    // 1DD OP_UNUSED_DDFF
+    DF_NOP,
+
+    // 1DE OP_UNUSED_DEFF
+    DF_NOP,
+
+    // 1DF OP_UNUSED_DFFF
+    DF_NOP,
+
+    // 1E0 OP_UNUSED_E0FF
+    DF_NOP,
+
+    // 1E1 OP_UNUSED_E1FF
+    DF_NOP,
+
+    // 1E2 OP_UNUSED_E2FF
+    DF_NOP,
+
+    // 1E3 OP_UNUSED_E3FF
+    DF_NOP,
+
+    // 1E4 OP_UNUSED_E4FF
+    DF_NOP,
+
+    // 1E5 OP_UNUSED_E5FF
+    DF_NOP,
+
+    // 1E6 OP_UNUSED_E6FF
+    DF_NOP,
+
+    // 1E7 OP_UNUSED_E7FF
+    DF_NOP,
+
+    // 1E8 OP_UNUSED_E8FF
+    DF_NOP,
+
+    // 1E9 OP_UNUSED_E9FF
+    DF_NOP,
+
+    // 1EA OP_UNUSED_EAFF
+    DF_NOP,
+
+    // 1EB OP_UNUSED_EBFF
+    DF_NOP,
+
+    // 1EC OP_UNUSED_ECFF
+    DF_NOP,
+
+    // 1ED OP_UNUSED_EDFF
+    DF_NOP,
+
+    // 1EE OP_UNUSED_EEFF
+    DF_NOP,
+
+    // 1EF OP_UNUSED_EFFF
+    DF_NOP,
+
+    // 1F0 OP_UNUSED_F0FF
+    DF_NOP,
+
+    // 1F1 OP_UNUSED_F1FF
+    DF_NOP,
+
+    // 1F2 OP_INVOKE_OBJECT_INIT_JUMBO
+    DF_NOP,
+
+    // 1F3 OP_IGET_VOLATILE_JUMBO
+    DF_DA | DF_UB,
+
+    // 1F4 OP_IGET_WIDE_VOLATILE_JUMBO
+    DF_DA_WIDE | DF_UB,
+
+    // 1F5 OP_IGET_OBJECT_VOLATILE_JUMBO
+    DF_DA | DF_UB,
+
+    // 1F6 OP_IPUT_VOLATILE_JUMBO
+    DF_UA | DF_UB,
+
+    // 1F7 OP_IPUT_WIDE_VOLATILE_JUMBO
+    DF_UA_WIDE | DF_UB,
+
+    // 1F8 OP_IPUT_OBJECT_VOLATILE_JUMBO
+    DF_UA | DF_UB,
+
+    // 1F9 OP_SGET_VOLATILE_JUMBO
+    DF_DA,
+
+    // 1FA OP_SGET_WIDE_VOLATILE_JUMBO
+    DF_DA_WIDE,
+
+    // 1FB OP_SGET_OBJECT_VOLATILE_JUMBO
+    DF_DA,
+
+    // 1FC OP_SPUT_VOLATILE_JUMBO
+    DF_UA,
+
+    // 1FD OP_SPUT_WIDE_VOLATILE_JUMBO
+    DF_UA_WIDE,
+
+    // 1FE OP_SPUT_OBJECT_VOLATILE_JUMBO
+    DF_UA,
+
+    // 1FF OP_THROW_VERIFICATION_ERROR_JUMBO
+    DF_NOP,
+
+    // Beginning of extended MIR opcodes
+    // 200 OP_MIR_PHI
+    DF_PHI | DF_DA,
     /*
      * For extended MIR inserted at the MIR2LIR stage, it is okay to have
      * undefined values here.
@@ -809,7 +1576,7 @@
 };
 
 /* Return the Dalvik register/subscript pair of a given SSA register */
-int dvmConvertSSARegToDalvik(CompilationUnit *cUnit, int ssaReg)
+int dvmConvertSSARegToDalvik(const CompilationUnit *cUnit, int ssaReg)
 {
       return GET_ELEM_N(cUnit->ssaToDalvikMap, int, ssaReg);
 }
@@ -819,21 +1586,61 @@
  * and subscript pair. Each SSA register can be used to index the
  * ssaToDalvikMap list to get the subscript[31..16]/dalvik_reg[15..0] mapping.
  */
-char *dvmCompilerGetDalvikDisassembly(DecodedInstruction *insn,
+char *dvmCompilerGetDalvikDisassembly(const DecodedInstruction *insn,
                                       char *note)
 {
     char buffer[256];
     int opcode = insn->opcode;
     int dfAttributes = dvmCompilerDataFlowAttributes[opcode];
+    int flags;
     char *ret;
 
     buffer[0] = 0;
-    strcpy(buffer, dexGetOpcodeName(opcode));
+    if (opcode >= kMirOpFirst) {
+        if (opcode == kMirOpPhi) {
+            strcpy(buffer, "PHI");
+        }
+        else {
+            sprintf(buffer, "Opcode 0x%x", opcode);
+        }
+        flags = 0;
+    } else {
+        strcpy(buffer, dexGetOpcodeName(opcode));
+        flags = dexGetFlagsFromOpcode(insn->opcode);
+    }
 
     if (note)
         strcat(buffer, note);
 
-    if (dfAttributes & DF_FORMAT_35C) {
+    /* For branches, decode the instructions to print out the branch targets */
+    if (flags & kInstrCanBranch) {
+        InstructionFormat dalvikFormat = dexGetFormatFromOpcode(insn->opcode);
+        int offset = 0;
+        switch (dalvikFormat) {
+            case kFmt21t:
+                snprintf(buffer + strlen(buffer), 256, " v%d,", insn->vA);
+                offset = (int) insn->vB;
+                break;
+            case kFmt22t:
+                snprintf(buffer + strlen(buffer), 256, " v%d, v%d,",
+                         insn->vA, insn->vB);
+                offset = (int) insn->vC;
+                break;
+            case kFmt10t:
+            case kFmt20t:
+            case kFmt30t:
+                offset = (int) insn->vA;
+                break;
+            default:
+                LOGE("Unexpected branch format %d / opcode %#x", dalvikFormat,
+                     opcode);
+                dvmAbort();
+                break;
+        }
+        snprintf(buffer + strlen(buffer), 256, " (%c%x)",
+                 offset > 0 ? '+' : '-',
+                 offset > 0 ? offset : -offset);
+    } else if (dfAttributes & DF_FORMAT_35C) {
         unsigned int i;
         for (i = 0; i < insn->vA; i++) {
             if (i != 0) strcat(buffer, ",");
@@ -851,18 +1658,153 @@
         if (dfAttributes & DF_B_IS_REG) {
             snprintf(buffer + strlen(buffer), 256, ", v%d", insn->vB);
         }
-        else {
+        else if (opcode < kMirOpFirst) {
             snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn->vB);
         }
         if (dfAttributes & DF_C_IS_REG) {
             snprintf(buffer + strlen(buffer), 256, ", v%d", insn->vC);
         }
-        else {
+        else if (opcode < kMirOpFirst) {
             snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn->vC);
         }
     }
     int length = strlen(buffer) + 1;
-    ret = dvmCompilerNew(length, false);
+    ret = (char *)dvmCompilerNew(length, false);
+    memcpy(ret, buffer, length);
+    return ret;
+}
+
+char *getSSAName(const CompilationUnit *cUnit, int ssaReg, char *name)
+{
+    int ssa2DalvikValue = dvmConvertSSARegToDalvik(cUnit, ssaReg);
+
+    sprintf(name, "v%d_%d",
+            DECODE_REG(ssa2DalvikValue), DECODE_SUB(ssa2DalvikValue));
+    return name;
+}
+
+/*
+ * Dalvik instruction disassembler with optional SSA printing.
+ */
+char *dvmCompilerFullDisassembler(const CompilationUnit *cUnit,
+                                  const MIR *mir)
+{
+    char buffer[256];
+    char operand0[256], operand1[256];
+    const DecodedInstruction *insn = &mir->dalvikInsn;
+    int opcode = insn->opcode;
+    int dfAttributes = dvmCompilerDataFlowAttributes[opcode];
+    int flags = dexGetFlagsFromOpcode(insn->opcode);
+    char *ret;
+    int length;
+
+    buffer[0] = 0;
+    if (opcode >= kMirOpFirst) {
+        if (opcode == kMirOpPhi) {
+            snprintf(buffer, 256, "PHI %s = (%s",
+                     getSSAName(cUnit, mir->ssaRep->defs[0], operand0),
+                     getSSAName(cUnit, mir->ssaRep->uses[0], operand1));
+            int i;
+            for (i = 1; i < mir->ssaRep->numUses; i++) {
+                snprintf(buffer + strlen(buffer), 256, ", %s",
+                         getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
+            }
+            snprintf(buffer + strlen(buffer), 256, ")");
+        }
+        else {
+            sprintf(buffer, "Opcode 0x%x", opcode);
+        }
+        goto done;
+    } else {
+        strcpy(buffer, dexGetOpcodeName(opcode));
+    }
+
+    /* For branches, decode the instructions to print out the branch targets */
+    if (flags & kInstrCanBranch) {
+        InstructionFormat dalvikFormat = dexGetFormatFromOpcode(insn->opcode);
+        int delta = 0;
+        switch (dalvikFormat) {
+            case kFmt21t:
+                snprintf(buffer + strlen(buffer), 256, " %s, ",
+                         getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
+                delta = (int) insn->vB;
+                break;
+            case kFmt22t:
+                snprintf(buffer + strlen(buffer), 256, " %s, %s, ",
+                         getSSAName(cUnit, mir->ssaRep->uses[0], operand0),
+                         getSSAName(cUnit, mir->ssaRep->uses[1], operand1));
+                delta = (int) insn->vC;
+                break;
+            case kFmt10t:
+            case kFmt20t:
+            case kFmt30t:
+                delta = (int) insn->vA;
+                break;
+            default:
+                LOGE("Unexpected branch format: %d", dalvikFormat);
+                dvmAbort();
+                break;
+        }
+        snprintf(buffer + strlen(buffer), 256, " %04x",
+                 mir->offset + delta);
+    } else if (dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
+        unsigned int i;
+        for (i = 0; i < insn->vA; i++) {
+            if (i != 0) strcat(buffer, ",");
+            snprintf(buffer + strlen(buffer), 256, " %s",
+                     getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
+        }
+    } else {
+        int udIdx;
+        if (mir->ssaRep->numDefs) {
+
+            for (udIdx = 0; udIdx < mir->ssaRep->numDefs; udIdx++) {
+                snprintf(buffer + strlen(buffer), 256, " %s",
+                         getSSAName(cUnit, mir->ssaRep->defs[udIdx], operand0));
+            }
+            strcat(buffer, ",");
+        }
+        if (mir->ssaRep->numUses) {
+            /* No leading ',' for the first use */
+            snprintf(buffer + strlen(buffer), 256, " %s",
+                     getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
+            for (udIdx = 1; udIdx < mir->ssaRep->numUses; udIdx++) {
+                snprintf(buffer + strlen(buffer), 256, ", %s",
+                         getSSAName(cUnit, mir->ssaRep->uses[udIdx], operand0));
+            }
+        }
+        if (opcode < kMirOpFirst) {
+            InstructionFormat dalvikFormat = dexGetFormatFromOpcode(opcode);
+            switch (dalvikFormat) {
+                case kFmt11n:        // op vA, #+B
+                case kFmt21s:        // op vAA, #+BBBB
+                case kFmt21h:        // op vAA, #+BBBB00000[00000000]
+                case kFmt31i:        // op vAA, #+BBBBBBBB
+                case kFmt51l:        // op vAA, #+BBBBBBBBBBBBBBBB
+                    snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vB);
+                    break;
+                case kFmt21c:        // op vAA, thing@BBBB
+                case kFmt31c:        // op vAA, thing@BBBBBBBB
+                    snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vB);
+                    break;
+                case kFmt22b:        // op vAA, vBB, #+CC
+                case kFmt22s:        // op vA, vB, #+CCCC
+                    snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vC);
+                    break;
+                case kFmt22c:        // op vA, vB, thing@CCCC
+                case kFmt22cs:       // [opt] op vA, vB, field offset CCCC
+                    snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vC);
+                    break;
+                    /* No need for special printing */
+                default:
+                    break;
+            }
+        }
+    }
+
+done:
+    length = strlen(buffer) + 1;
+    ret = (char *) dvmCompilerNew(length, false);
     memcpy(ret, buffer, length);
     return ret;
 }
@@ -904,7 +1846,7 @@
     }
 
     int length = strlen(buffer) + 1;
-    ret = dvmCompilerNew(length, false);
+    ret = (char *)dvmCompilerNew(length, false);
     memcpy(ret, buffer, length);
     return ret;
 }
@@ -920,7 +1862,7 @@
 }
 
 /* Mark a reg as being defined */
-static inline void handleLiveInDef(BitVector *defV, int dalvikRegId)
+static inline void handleDef(BitVector *defV, int dalvikRegId)
 {
     dvmCompilerSetBit(defV, dalvikRegId);
 }
@@ -929,22 +1871,19 @@
  * Find out live-in variables for natural loops. Variables that are live-in in
  * the main loop body are considered to be defined in the entry block.
  */
-void dvmCompilerFindLiveIn(CompilationUnit *cUnit, BasicBlock *bb)
+bool dvmCompilerFindLocalLiveIn(CompilationUnit *cUnit, BasicBlock *bb)
 {
     MIR *mir;
     BitVector *useV, *defV, *liveInV;
 
-    if (bb->blockType != kDalvikByteCode &&
-        bb->blockType != kTraceEntryBlock) {
-        return;
-    }
+    if (bb->dataFlowInfo == NULL) return false;
 
     useV = bb->dataFlowInfo->useV =
-        dvmCompilerAllocBitVector(cUnit->method->registersSize, false);
+        dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false);
     defV = bb->dataFlowInfo->defV =
-        dvmCompilerAllocBitVector(cUnit->method->registersSize, false);
+        dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false);
     liveInV = bb->dataFlowInfo->liveInV =
-        dvmCompilerAllocBitVector(cUnit->method->registersSize, false);
+        dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false);
 
     for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
         int dfAttributes =
@@ -972,12 +1911,13 @@
             }
         }
         if (dfAttributes & DF_HAS_DEFS) {
-            handleLiveInDef(defV, dInsn->vA);
+            handleDef(defV, dInsn->vA);
             if (dfAttributes & DF_DA_WIDE) {
-                handleLiveInDef(defV, dInsn->vA+1);
+                handleDef(defV, dInsn->vA+1);
             }
         }
     }
+    return true;
 }
 
 /* Find out the latest SSA register for a given Dalvik register */
@@ -1002,7 +1942,7 @@
     cUnit->dalvikToSSAMap[dalvikReg] = newD2SMapping;
 
     int newS2DMapping = ENCODE_REG_SUB(dalvikReg, dalvikSub);
-    dvmInsertGrowableList(cUnit->ssaToDalvikMap, (void *) newS2DMapping);
+    dvmInsertGrowableList(cUnit->ssaToDalvikMap, newS2DMapping);
 
     defs[regIndex] = ssaReg;
 }
@@ -1015,7 +1955,7 @@
     int i;
 
     mir->ssaRep->numUses = numUses;
-    mir->ssaRep->uses = dvmCompilerNew(sizeof(int) * numUses, false);
+    mir->ssaRep->uses = (int *)dvmCompilerNew(sizeof(int) * numUses, false);
 
     for (i = 0; i < numUses; i++) {
         handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->arg[i], i);
@@ -1030,7 +1970,7 @@
     int i;
 
     mir->ssaRep->numUses = numUses;
-    mir->ssaRep->uses = dvmCompilerNew(sizeof(int) * numUses, false);
+    mir->ssaRep->uses = (int *)dvmCompilerNew(sizeof(int) * numUses, false);
 
     for (i = 0; i < numUses; i++) {
         handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+i, i);
@@ -1038,16 +1978,15 @@
 }
 
 /* Entry function to convert a block into SSA representation */
-void dvmCompilerDoSSAConversion(CompilationUnit *cUnit, BasicBlock *bb)
+bool dvmCompilerDoSSAConversion(CompilationUnit *cUnit, BasicBlock *bb)
 {
     MIR *mir;
 
-    if (bb->blockType != kDalvikByteCode && bb->blockType != kTraceEntryBlock) {
-        return;
-    }
+    if (bb->dataFlowInfo == NULL) return false;
 
     for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
-        mir->ssaRep = dvmCompilerNew(sizeof(SSARepresentation), true);
+        mir->ssaRep = (struct SSARepresentation *)
+            dvmCompilerNew(sizeof(SSARepresentation), true);
 
         int dfAttributes =
             dvmCompilerDataFlowAttributes[mir->dalvikInsn.opcode];
@@ -1084,8 +2023,10 @@
 
         if (numUses) {
             mir->ssaRep->numUses = numUses;
-            mir->ssaRep->uses = dvmCompilerNew(sizeof(int) * numUses, false);
-            mir->ssaRep->fpUse = dvmCompilerNew(sizeof(bool) * numUses, false);
+            mir->ssaRep->uses = (int *)dvmCompilerNew(sizeof(int) * numUses,
+                                                      false);
+            mir->ssaRep->fpUse = (bool *)dvmCompilerNew(sizeof(bool) * numUses,
+                                                false);
         }
 
         int numDefs = 0;
@@ -1099,8 +2040,10 @@
 
         if (numDefs) {
             mir->ssaRep->numDefs = numDefs;
-            mir->ssaRep->defs = dvmCompilerNew(sizeof(int) * numDefs, false);
-            mir->ssaRep->fpDef = dvmCompilerNew(sizeof(bool) * numDefs, false);
+            mir->ssaRep->defs = (int *)dvmCompilerNew(sizeof(int) * numDefs,
+                                                      false);
+            mir->ssaRep->fpDef = (bool *)dvmCompilerNew(sizeof(bool) * numDefs,
+                                                        false);
         }
 
         DecodedInstruction *dInsn = &mir->dalvikInsn;
@@ -1145,12 +2088,18 @@
         }
     }
 
+    /*
+     * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+     * input to PHI nodes can be derived from the snapshot of all predecessor
+     * blocks.
+     */
     bb->dataFlowInfo->dalvikToSSAMap =
-        dvmCompilerNew(sizeof(int) * cUnit->method->registersSize, false);
+        (int *)dvmCompilerNew(sizeof(int) * cUnit->method->registersSize,
+                              false);
 
-    /* Take a snapshot of Dalvik->SSA mapping at the end of each block */
     memcpy(bb->dataFlowInfo->dalvikToSSAMap, cUnit->dalvikToSSAMap,
            sizeof(int) * cUnit->method->registersSize);
+    return true;
 }
 
 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
@@ -1160,7 +2109,7 @@
     cUnit->constantValues[ssaReg] = value;
 }
 
-void dvmCompilerDoConstantPropagation(CompilationUnit *cUnit, BasicBlock *bb)
+bool dvmCompilerDoConstantPropagation(CompilationUnit *cUnit, BasicBlock *bb)
 {
     MIR *mir;
     BitVector *isConstantV = cUnit->isConstantV;
@@ -1230,9 +2179,10 @@
         }
     }
     /* TODO: implement code to handle arithmetic operations */
+    return true;
 }
 
-void dvmCompilerFindInductionVariables(struct CompilationUnit *cUnit,
+bool dvmCompilerFindInductionVariables(struct CompilationUnit *cUnit,
                                        struct BasicBlock *bb)
 {
     BitVector *isIndVarV = cUnit->loopAnalysis->isIndVarV;
@@ -1242,13 +2192,13 @@
 
     if (bb->blockType != kDalvikByteCode &&
         bb->blockType != kTraceEntryBlock) {
-        return;
+        return false;
     }
 
     /* If the bb doesn't have a phi it cannot contain an induction variable */
     if (bb->firstMIRInsn == NULL ||
         bb->firstMIRInsn->dalvikInsn.opcode != kMirOpPhi) {
-        return;
+        return false;
     }
 
     /* Find basic induction variable first */
@@ -1299,7 +2249,7 @@
                 }
                 if (deltaIsConstant) {
                     dvmSetBit(isIndVarV, mir->ssaRep->uses[0]);
-                    InductionVariableInfo *ivInfo =
+                    InductionVariableInfo *ivInfo = (InductionVariableInfo *)
                         dvmCompilerNew(sizeof(InductionVariableInfo),
                                        false);
 
@@ -1308,7 +2258,7 @@
                     ivInfo->m = 1;         // always 1 to basic iv
                     ivInfo->c = 0;         // N/A to basic iv
                     ivInfo->inc = deltaValue;
-                    dvmInsertGrowableList(ivList, (void *) ivInfo);
+                    dvmInsertGrowableList(ivList, (intptr_t) ivInfo);
                     cUnit->loopAnalysis->numBasicIV++;
                     break;
                 }
@@ -1372,13 +2322,13 @@
             if (cIsConstant) {
                 unsigned int i;
                 dvmSetBit(isIndVarV, mir->ssaRep->defs[0]);
-                InductionVariableInfo *ivInfo =
+                InductionVariableInfo *ivInfo = (InductionVariableInfo *)
                     dvmCompilerNew(sizeof(InductionVariableInfo),
                                    false);
                 InductionVariableInfo *ivInfoOld = NULL ;
 
                 for (i = 0; i < ivList->numUsed; i++) {
-                    ivInfoOld = ivList->elemList[i];
+                    ivInfoOld = (InductionVariableInfo *) ivList->elemList[i];
                     if (ivInfoOld->ssaReg == mir->ssaRep->uses[0]) break;
                 }
 
@@ -1390,10 +2340,11 @@
                 ivInfo->m = ivInfoOld->m;
                 ivInfo->c = c + ivInfoOld->c;
                 ivInfo->inc = ivInfoOld->inc;
-                dvmInsertGrowableList(ivList, (void *) ivInfo);
+                dvmInsertGrowableList(ivList, (intptr_t) ivInfo);
             }
         }
     }
+    return true;
 }
 
 /* Setup the basic data structures for SSA conversion */
@@ -1402,7 +2353,8 @@
     int i;
     int numDalvikReg = cUnit->method->registersSize;
 
-    cUnit->ssaToDalvikMap = dvmCompilerNew(sizeof(GrowableList), false);
+    cUnit->ssaToDalvikMap = (GrowableList *)dvmCompilerNew(sizeof(GrowableList),
+                                                           false);
     dvmInitGrowableList(cUnit->ssaToDalvikMap, numDalvikReg);
 
     /*
@@ -1417,8 +2369,7 @@
      * into "(0 << 16) | i"
      */
     for (i = 0; i < numDalvikReg; i++) {
-        dvmInsertGrowableList(cUnit->ssaToDalvikMap,
-                              (void *) ENCODE_REG_SUB(i, 0));
+        dvmInsertGrowableList(cUnit->ssaToDalvikMap, ENCODE_REG_SUB(i, 0));
     }
 
     /*
@@ -1426,7 +2377,8 @@
      * while the high 16 bit is the current subscript. The original Dalvik
      * register N is mapped to SSA register N with subscript 0.
      */
-    cUnit->dalvikToSSAMap = dvmCompilerNew(sizeof(int) * numDalvikReg, false);
+    cUnit->dalvikToSSAMap = (int *)dvmCompilerNew(sizeof(int) * numDalvikReg,
+                                                  false);
     for (i = 0; i < numDalvikReg; i++) {
         cUnit->dalvikToSSAMap[i] = i;
     }
@@ -1434,27 +2386,127 @@
     /*
      * Allocate the BasicBlockDataFlow structure for the entry and code blocks
      */
-    for (i = 0; i < cUnit->numBlocks; i++) {
-        BasicBlock *bb = cUnit->blockList[i];
+    GrowableListIterator iterator;
+
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
+
+    while (true) {
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
         if (bb->blockType == kDalvikByteCode ||
-            bb->blockType == kTraceEntryBlock) {
-            bb->dataFlowInfo = dvmCompilerNew(sizeof(BasicBlockDataFlow), true);
+            bb->blockType == kTraceEntryBlock ||
+            bb->blockType == kMethodEntryBlock ||
+            bb->blockType == kMethodExitBlock) {
+            bb->dataFlowInfo = (BasicBlockDataFlow *)
+                dvmCompilerNew(sizeof(BasicBlockDataFlow),
+                               true);
         }
     }
 }
 
-void dvmCompilerDataFlowAnalysisDispatcher(CompilationUnit *cUnit,
-                void (*func)(CompilationUnit *, BasicBlock *))
+/* Clear the visited flag for each BB */
+bool dvmCompilerClearVisitedFlag(struct CompilationUnit *cUnit,
+                                 struct BasicBlock *bb)
 {
-    int i;
-    for (i = 0; i < cUnit->numBlocks; i++) {
-        BasicBlock *bb = cUnit->blockList[i];
-        (*func)(cUnit, bb);
+    bb->visited = false;
+    return true;
+}
+
+void dvmCompilerDataFlowAnalysisDispatcher(CompilationUnit *cUnit,
+                bool (*func)(CompilationUnit *, BasicBlock *),
+                DataFlowAnalysisMode dfaMode,
+                bool isIterative)
+{
+    bool change = true;
+
+    while (change) {
+        change = false;
+
+        /* Scan all blocks and perform the operations specified in func */
+        if (dfaMode == kAllNodes) {
+            GrowableListIterator iterator;
+            dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
+            while (true) {
+                BasicBlock *bb =
+                    (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+                if (bb == NULL) break;
+                change |= (*func)(cUnit, bb);
+            }
+        }
+        /*
+         * Scan all reachable blocks and perform the operations specified in
+         * func.
+         */
+        else if (dfaMode == kReachableNodes) {
+            int numReachableBlocks = cUnit->numReachableBlocks;
+            int idx;
+            const GrowableList *blockList = &cUnit->blockList;
+
+            for (idx = 0; idx < numReachableBlocks; idx++) {
+                int blockIdx = cUnit->dfsOrder.elemList[idx];
+                BasicBlock *bb =
+                    (BasicBlock *) dvmGrowableListGetElement(blockList,
+                                                             blockIdx);
+                change |= (*func)(cUnit, bb);
+            }
+        }
+        /*
+         * Scan all reachable blocks by the pre-order in the depth-first-search
+         * CFG and perform the operations specified in func.
+         */
+        else if (dfaMode == kPreOrderDFSTraversal) {
+            int numReachableBlocks = cUnit->numReachableBlocks;
+            int idx;
+            const GrowableList *blockList = &cUnit->blockList;
+
+            for (idx = 0; idx < numReachableBlocks; idx++) {
+                int dfsIdx = cUnit->dfsOrder.elemList[idx];
+                BasicBlock *bb =
+                    (BasicBlock *) dvmGrowableListGetElement(blockList, dfsIdx);
+                change |= (*func)(cUnit, bb);
+            }
+        }
+        /*
+         * Scan all reachable blocks by the post-order in the depth-first-search
+         * CFG and perform the operations specified in func.
+         */
+        else if (dfaMode == kPostOrderDFSTraversal) {
+            int numReachableBlocks = cUnit->numReachableBlocks;
+            int idx;
+            const GrowableList *blockList = &cUnit->blockList;
+
+            for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
+                int dfsIdx = cUnit->dfsOrder.elemList[idx];
+                BasicBlock *bb =
+                    (BasicBlock *) dvmGrowableListGetElement(blockList, dfsIdx);
+                change |= (*func)(cUnit, bb);
+            }
+        }
+        /*
+         * Scan all reachable blocks by the post-order in the dominator tree
+         * and perform the operations specified in func.
+         */
+        else if (dfaMode == kPostOrderDOMTraversal) {
+            int numReachableBlocks = cUnit->numReachableBlocks;
+            int idx;
+            const GrowableList *blockList = &cUnit->blockList;
+
+            for (idx = 0; idx < numReachableBlocks; idx++) {
+                int domIdx = cUnit->domPostOrderTraversal.elemList[idx];
+                BasicBlock *bb =
+                    (BasicBlock *) dvmGrowableListGetElement(blockList, domIdx);
+                change |= (*func)(cUnit, bb);
+            }
+        }
+        /* If isIterative is false, exit the loop after the first iteration */
+        change &= isIterative;
     }
 }
 
 /* Main entry point to do SSA conversion for non-loop traces */
 void dvmCompilerNonLoopAnalysis(CompilationUnit *cUnit)
 {
-    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerDoSSAConversion);
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerDoSSAConversion,
+                                          kAllNodes,
+                                          false /* isIterative */);
 }
diff --git a/vm/compiler/Frontend.c b/vm/compiler/Frontend.c
index d146e22..8663c5c 100644
--- a/vm/compiler/Frontend.c
+++ b/vm/compiler/Frontend.c
@@ -16,36 +16,42 @@
 
 #include "Dalvik.h"
 #include "libdex/DexOpcodes.h"
+#include "libdex/DexCatch.h"
 #include "interp/Jit.h"
 #include "CompilerInternals.h"
 #include "Dataflow.h"
 
+static inline bool contentIsInsn(const u2 *codePtr) {
+    u2 instr = *codePtr;
+    Opcode opcode = instr & 0xff;
+
+    /*
+     * Since the low 8-bit in metadata may look like OP_NOP, we need to check
+     * both the low and whole sub-word to determine whether it is code or data.
+     */
+    return (opcode != OP_NOP || instr == 0);
+}
+
 /*
  * Parse an instruction, return the length of the instruction
  */
 static inline int parseInsn(const u2 *codePtr, DecodedInstruction *decInsn,
                             bool printMe)
 {
+    // Don't parse instruction data
+    if (!contentIsInsn(codePtr)) {
+        return 0;
+    }
+
     u2 instr = *codePtr;
     Opcode opcode = dexOpcodeFromCodeUnit(instr);
-    int insnWidth;
-
-    // Don't parse instruction data
-    if (opcode == OP_NOP && instr != 0) {
-        return 0;
-    } else {
-        insnWidth = dexGetWidthFromOpcode(opcode);
-        if (insnWidth < 0) {
-            insnWidth = -insnWidth;
-        }
-    }
 
     dexDecodeInstruction(codePtr, decInsn);
     if (printMe) {
         char *decodedString = dvmCompilerGetDalvikDisassembly(decInsn, NULL);
         LOGD("%p: %#06x %s\n", codePtr, opcode, decodedString);
     }
-    return insnWidth;
+    return dexGetWidthFromOpcode(opcode);
 }
 
 #define UNKNOWN_TARGET 0xffffffff
@@ -70,14 +76,17 @@
           break;
         case OP_INVOKE_VIRTUAL:
         case OP_INVOKE_VIRTUAL_RANGE:
+        case OP_INVOKE_VIRTUAL_JUMBO:
         case OP_INVOKE_INTERFACE:
         case OP_INVOKE_INTERFACE_RANGE:
+        case OP_INVOKE_INTERFACE_JUMBO:
         case OP_INVOKE_VIRTUAL_QUICK:
         case OP_INVOKE_VIRTUAL_QUICK_RANGE:
             *isInvoke = true;
             break;
         case OP_INVOKE_SUPER:
-        case OP_INVOKE_SUPER_RANGE: {
+        case OP_INVOKE_SUPER_RANGE:
+        case OP_INVOKE_SUPER_JUMBO: {
             int mIndex = caller->clazz->pDvmDex->
                 pResMethods[insn->dalvikInsn.vB]->methodIndex;
             const Method *calleeMethod =
@@ -91,7 +100,8 @@
             break;
         }
         case OP_INVOKE_STATIC:
-        case OP_INVOKE_STATIC_RANGE: {
+        case OP_INVOKE_STATIC_RANGE:
+        case OP_INVOKE_STATIC_JUMBO: {
             const Method *calleeMethod =
                 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB];
 
@@ -115,7 +125,8 @@
             break;
         }
         case OP_INVOKE_DIRECT:
-        case OP_INVOKE_DIRECT_RANGE: {
+        case OP_INVOKE_DIRECT_RANGE:
+        case OP_INVOKE_DIRECT_JUMBO: {
             const Method *calleeMethod =
                 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB];
             if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) {
@@ -277,10 +288,12 @@
 
     /* For lookup only */
     dummyMethodEntry.method = method;
-    realMethodEntry = dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue,
-                                         &dummyMethodEntry,
-                                         (HashCompareFunc) compareMethod,
-                                         false);
+    realMethodEntry = (CompilerMethodStats *)
+        dvmHashTableLookup(gDvmJit.methodStatsTable,
+                           hashValue,
+                           &dummyMethodEntry,
+                           (HashCompareFunc) compareMethod,
+                           false);
 
     /* This method has never been analyzed before - create an entry */
     if (realMethodEntry == NULL) {
@@ -413,21 +426,22 @@
 {
     const DexCode *dexCode = dvmGetMethodCode(desc->method);
     const JitTraceRun* currRun = &desc->trace[0];
-    unsigned int curOffset = currRun->frag.startOffset;
-    unsigned int numInsts = currRun->frag.numInsts;
+    unsigned int curOffset = currRun->info.frag.startOffset;
+    unsigned int numInsts = currRun->info.frag.numInsts;
     const u2 *codePtr = dexCode->insns + curOffset;
     int traceSize = 0;  // # of half-words
     const u2 *startCodePtr = codePtr;
-    BasicBlock *startBB, *curBB, *lastBB;
+    BasicBlock *curBB, *entryCodeBB;
     int numBlocks = 0;
     static int compilationId;
     CompilationUnit cUnit;
+    GrowableList *blockList;
 #if defined(WITH_JIT_TUNING)
     CompilerMethodStats *methodStats;
 #endif
 
     /* If we've already compiled this trace, just return success */
-    if (dvmJitGetCodeAddr(startCodePtr) && !info->discardResult) {
+    if (dvmJitGetTraceAddr(startCodePtr) && !info->discardResult) {
         /*
          * Make sure the codeAddress is NULL so that it won't clobber the
          * existing entry.
@@ -436,6 +450,11 @@
         return true;
     }
 
+    /* If the work order is stale, discard it */
+    if (info->cacheVersion != gDvmJit.cacheVersion) {
+        return false;
+    }
+
     compilationId++;
     memset(&cUnit, 0, sizeof(CompilationUnit));
 
@@ -450,20 +469,21 @@
     /* Initialize the printMe flag */
     cUnit.printMe = gDvmJit.printMe;
 
-    /* Initialize the profile flag */
-    cUnit.executionCount = gDvmJit.profile;
-
     /* Setup the method */
     cUnit.method = desc->method;
 
     /* Initialize the PC reconstruction list */
     dvmInitGrowableList(&cUnit.pcReconstructionList, 8);
 
+    /* Initialize the basic block list */
+    blockList = &cUnit.blockList;
+    dvmInitGrowableList(blockList, 8);
+
     /* Identify traces that we don't want to compile */
     if (gDvmJit.methodTable) {
         int len = strlen(desc->method->clazz->descriptor) +
                   strlen(desc->method->name) + 1;
-        char *fullSignature = dvmCompilerNew(len, true);
+        char *fullSignature = (char *)dvmCompilerNew(len, true);
         strcpy(fullSignature, desc->method->clazz->descriptor);
         strcat(fullSignature, desc->method->name);
 
@@ -533,18 +553,15 @@
     }
 
     /* Allocate the entry block */
-    lastBB = startBB = curBB = dvmCompilerNewBB(kTraceEntryBlock);
+    curBB = dvmCompilerNewBB(kTraceEntryBlock, numBlocks++);
+    dvmInsertGrowableList(blockList, (intptr_t) curBB);
     curBB->startOffset = curOffset;
-    curBB->id = numBlocks++;
 
-    curBB = dvmCompilerNewBB(kDalvikByteCode);
-    curBB->startOffset = curOffset;
-    curBB->id = numBlocks++;
-
-    /* Make the first real dalvik block the fallthrough of the entry block */
-    startBB->fallThrough = curBB;
-    lastBB->next = curBB;
-    lastBB = curBB;
+    entryCodeBB = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
+    dvmInsertGrowableList(blockList, (intptr_t) entryCodeBB);
+    entryCodeBB->startOffset = curOffset;
+    curBB->fallThrough = entryCodeBB;
+    curBB = entryCodeBB;
 
     if (cUnit.printMe) {
         LOGD("--------\nCompiler: Building trace for %s, offset 0x%x\n",
@@ -558,7 +575,7 @@
     while (1) {
         MIR *insn;
         int width;
-        insn = dvmCompilerNew(sizeof(MIR), true);
+        insn = (MIR *)dvmCompilerNew(sizeof(MIR), true);
         insn->offset = curOffset;
         width = parseInsn(codePtr, &insn->dalvikInsn, cUnit.printMe);
 
@@ -572,11 +589,16 @@
         int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
 
         if (flags & kInstrInvoke) {
+            const Method *calleeMethod = (const Method *)
+                currRun[JIT_TRACE_CUR_METHOD].info.meta;
             assert(numInsts == 1);
             CallsiteInfo *callsiteInfo =
-                dvmCompilerNew(sizeof(CallsiteInfo), true);
-            callsiteInfo->clazz = currRun[1].meta;
-            callsiteInfo->method = currRun[2].meta;
+                (CallsiteInfo *)dvmCompilerNew(sizeof(CallsiteInfo), true);
+            callsiteInfo->classDescriptor = (const char *)
+                currRun[JIT_TRACE_CLASS_DESC].info.meta;
+            callsiteInfo->classLoader = (Object *)
+                currRun[JIT_TRACE_CLASS_LOADER].info.meta;
+            callsiteInfo->method = calleeMethod;
             insn->meta.callsiteInfo = callsiteInfo;
         }
 
@@ -585,25 +607,23 @@
             break;
         }
         if (--numInsts == 0) {
-            if (currRun->frag.runEnd) {
+            if (currRun->info.frag.runEnd) {
                 break;
             } else {
                 /* Advance to the next trace description (ie non-meta info) */
                 do {
                     currRun++;
-                } while (!currRun->frag.isCode);
+                } while (!currRun->isCode);
 
                 /* Dummy end-of-run marker seen */
-                if (currRun->frag.numInsts == 0) {
+                if (currRun->info.frag.numInsts == 0) {
                     break;
                 }
 
-                curBB = dvmCompilerNewBB(kDalvikByteCode);
-                lastBB->next = curBB;
-                lastBB = curBB;
-                curBB->id = numBlocks++;
-                curOffset = currRun->frag.startOffset;
-                numInsts = currRun->frag.numInsts;
+                curBB = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
+                dvmInsertGrowableList(blockList, (intptr_t) curBB);
+                curOffset = currRun->info.frag.startOffset;
+                numInsts = currRun->info.frag.numInsts;
                 curBB->startOffset = curOffset;
                 codePtr = dexCode->insns + curOffset;
             }
@@ -623,8 +643,11 @@
      * taken/fallthrough links. Also create chaining cells for code not included
      * in the trace.
      */
-    for (curBB = startBB; curBB; curBB = curBB->next) {
+    size_t blockId;
+    for (blockId = 0; blockId < blockList->numUsed; blockId++) {
+        curBB = (BasicBlock *) dvmGrowableListGetElement(blockList, blockId);
         MIR *lastInsn = curBB->lastMIRInsn;
+        BasicBlock *backwardCell;
         /* Skip empty blocks */
         if (lastInsn == NULL) {
             continue;
@@ -648,12 +671,18 @@
         }
 
         /* No backward branch in the trace - start searching the next BB */
-        for (searchBB = curBB->next; searchBB; searchBB = searchBB->next) {
+        size_t searchBlockId;
+        for (searchBlockId = blockId+1; searchBlockId < blockList->numUsed;
+             searchBlockId++) {
+            searchBB = (BasicBlock *) dvmGrowableListGetElement(blockList,
+                                                                searchBlockId);
             if (targetOffset == searchBB->startOffset) {
                 curBB->taken = searchBB;
+                dvmCompilerSetBit(searchBB->predecessors, curBB->id);
             }
             if (fallThroughOffset == searchBB->startOffset) {
                 curBB->fallThrough = searchBB;
+                dvmCompilerSetBit(searchBB->predecessors, curBB->id);
 
                 /*
                  * Fallthrough block of an invoke instruction needs to be
@@ -680,7 +709,7 @@
         if (curBB->taken == NULL &&
             curBB->fallThrough == NULL &&
             flags == (kInstrCanBranch | kInstrCanContinue) &&
-            fallThroughOffset == startBB->startOffset &&
+            fallThroughOffset == entryCodeBB->startOffset &&
             JIT_OPT_NO_LOOP != (optHints & JIT_OPT_NO_LOOP)) {
             BasicBlock *loopBranch = curBB;
             BasicBlock *exitBB;
@@ -689,50 +718,28 @@
             if (cUnit.printMe) {
                 LOGD("Natural loop detected!");
             }
-            exitBB = dvmCompilerNewBB(kTraceExitBlock);
-            lastBB->next = exitBB;
-            lastBB = exitBB;
-
+            exitBB = dvmCompilerNewBB(kTraceExitBlock, numBlocks++);
+            dvmInsertGrowableList(blockList, (intptr_t) exitBB);
             exitBB->startOffset = targetOffset;
-            exitBB->id = numBlocks++;
             exitBB->needFallThroughBranch = true;
 
             loopBranch->taken = exitBB;
-#if defined(WITH_SELF_VERIFICATION)
-            BasicBlock *backwardCell =
-                dvmCompilerNewBB(kChainingCellBackwardBranch);
-            lastBB->next = backwardCell;
-            lastBB = backwardCell;
-
-            backwardCell->startOffset = startBB->startOffset;
-            backwardCell->id = numBlocks++;
+            dvmCompilerSetBit(exitBB->predecessors, loopBranch->id);
+            backwardCell =
+                dvmCompilerNewBB(kChainingCellBackwardBranch, numBlocks++);
+            dvmInsertGrowableList(blockList, (intptr_t) backwardCell);
+            backwardCell->startOffset = entryCodeBB->startOffset;
             loopBranch->fallThrough = backwardCell;
-#elif defined(WITH_JIT_TUNING)
-            if (gDvmJit.profile) {
-                BasicBlock *backwardCell =
-                    dvmCompilerNewBB(kChainingCellBackwardBranch);
-                lastBB->next = backwardCell;
-                lastBB = backwardCell;
-
-                backwardCell->startOffset = startBB->startOffset;
-                backwardCell->id = numBlocks++;
-                loopBranch->fallThrough = backwardCell;
-            } else {
-                loopBranch->fallThrough = startBB->next;
-            }
-#else
-            loopBranch->fallThrough = startBB->next;
-#endif
+            dvmCompilerSetBit(backwardCell->predecessors, loopBranch->id);
 
             /* Create the chaining cell as the fallthrough of the exit block */
-            exitChainingCell = dvmCompilerNewBB(kChainingCellNormal);
-            lastBB->next = exitChainingCell;
-            lastBB = exitChainingCell;
-
+            exitChainingCell = dvmCompilerNewBB(kChainingCellNormal,
+                                                numBlocks++);
+            dvmInsertGrowableList(blockList, (intptr_t) exitChainingCell);
             exitChainingCell->startOffset = targetOffset;
-            exitChainingCell->id = numBlocks++;
 
             exitBB->fallThrough = exitChainingCell;
+            dvmCompilerSetBit(exitChainingCell->predecessors, exitBB->id);
 
             cUnit.hasLoop = true;
         }
@@ -761,38 +768,36 @@
 
             /* One chaining cell for the first MAX_CHAINED_SWITCH_CASES cases */
             for (i = 0; i < maxChains; i++) {
-                BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal);
-                lastBB->next = caseChain;
-                lastBB = caseChain;
-
+                BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal,
+                                                         numBlocks++);
+                dvmInsertGrowableList(blockList, (intptr_t) caseChain);
                 caseChain->startOffset = lastInsn->offset + targets[i];
-                caseChain->id = numBlocks++;
             }
 
             /* One more chaining cell for the default case */
-            BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal);
-            lastBB->next = caseChain;
-            lastBB = caseChain;
-
+            BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal,
+                                                     numBlocks++);
+            dvmInsertGrowableList(blockList, (intptr_t) caseChain);
             caseChain->startOffset = lastInsn->offset + lastInsn->width;
-            caseChain->id = numBlocks++;
         /* Fallthrough block not included in the trace */
         } else if (!isUnconditionalBranch(lastInsn) &&
                    curBB->fallThrough == NULL) {
+            BasicBlock *fallThroughBB;
             /*
              * If the chaining cell is after an invoke or
              * instruction that cannot change the control flow, request a hot
              * chaining cell.
              */
             if (isInvoke || curBB->needFallThroughBranch) {
-                lastBB->next = dvmCompilerNewBB(kChainingCellHot);
+                fallThroughBB = dvmCompilerNewBB(kChainingCellHot, numBlocks++);
             } else {
-                lastBB->next = dvmCompilerNewBB(kChainingCellNormal);
+                fallThroughBB = dvmCompilerNewBB(kChainingCellNormal,
+                                                 numBlocks++);
             }
-            lastBB = lastBB->next;
-            lastBB->id = numBlocks++;
-            lastBB->startOffset = fallThroughOffset;
-            curBB->fallThrough = lastBB;
+            dvmInsertGrowableList(blockList, (intptr_t) fallThroughBB);
+            fallThroughBB->startOffset = fallThroughOffset;
+            curBB->fallThrough = fallThroughBB;
+            dvmCompilerSetBit(fallThroughBB->predecessors, curBB->id);
         }
         /* Target block not included in the trace */
         if (curBB->taken == NULL &&
@@ -804,13 +809,15 @@
                 if (callee) {
                     /* JNI call doesn't need a chaining cell */
                     if (!dvmIsNativeMethod(callee)) {
-                        newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton);
+                        newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton,
+                                                 numBlocks++);
                         newBB->startOffset = 0;
                         newBB->containingMethod = callee;
                     }
                 /* Will resolve at runtime */
                 } else {
-                    newBB = dvmCompilerNewBB(kChainingCellInvokePredicted);
+                    newBB = dvmCompilerNewBB(kChainingCellInvokePredicted,
+                                             numBlocks++);
                     newBB->startOffset = 0;
                 }
             /* For unconditional branches, request a hot chaining cell */
@@ -818,79 +825,69 @@
 #if !defined(WITH_SELF_VERIFICATION)
                 newBB = dvmCompilerNewBB(dexIsGoto(flags) ?
                                                   kChainingCellHot :
-                                                  kChainingCellNormal);
+                                                  kChainingCellNormal,
+                                         numBlocks++);
                 newBB->startOffset = targetOffset;
 #else
                 /* Handle branches that branch back into the block */
                 if (targetOffset >= curBB->firstMIRInsn->offset &&
                     targetOffset <= curBB->lastMIRInsn->offset) {
-                    newBB = dvmCompilerNewBB(kChainingCellBackwardBranch);
+                    newBB = dvmCompilerNewBB(kChainingCellBackwardBranch,
+                                             numBlocks++);
                 } else {
                     newBB = dvmCompilerNewBB(dexIsGoto(flags) ?
                                                       kChainingCellHot :
-                                                      kChainingCellNormal);
+                                                      kChainingCellNormal,
+                                             numBlocks++);
                 }
                 newBB->startOffset = targetOffset;
 #endif
             }
             if (newBB) {
-                newBB->id = numBlocks++;
                 curBB->taken = newBB;
-                lastBB->next = newBB;
-                lastBB = newBB;
+                dvmCompilerSetBit(newBB->predecessors, curBB->id);
+                dvmInsertGrowableList(blockList, (intptr_t) newBB);
             }
         }
     }
 
     /* Now create a special block to host PC reconstruction code */
-    lastBB->next = dvmCompilerNewBB(kPCReconstruction);
-    lastBB = lastBB->next;
-    lastBB->id = numBlocks++;
+    curBB = dvmCompilerNewBB(kPCReconstruction, numBlocks++);
+    dvmInsertGrowableList(blockList, (intptr_t) curBB);
 
     /* And one final block that publishes the PC and raise the exception */
-    lastBB->next = dvmCompilerNewBB(kExceptionHandling);
-    lastBB = lastBB->next;
-    lastBB->id = numBlocks++;
+    curBB = dvmCompilerNewBB(kExceptionHandling, numBlocks++);
+    dvmInsertGrowableList(blockList, (intptr_t) curBB);
 
     if (cUnit.printMe) {
-        char* signature = dexProtoCopyMethodDescriptor(&desc->method->prototype);
+        char* signature =
+            dexProtoCopyMethodDescriptor(&desc->method->prototype);
         LOGD("TRACEINFO (%d): 0x%08x %s%s.%s 0x%x %d of %d, %d blocks",
             compilationId,
             (intptr_t) desc->method->insns,
             desc->method->clazz->descriptor,
             desc->method->name,
             signature,
-            desc->trace[0].frag.startOffset,
+            desc->trace[0].info.frag.startOffset,
             traceSize,
             dexCode->insnsSize,
             numBlocks);
         free(signature);
     }
 
-    BasicBlock **blockList;
-
     cUnit.traceDesc = desc;
     cUnit.numBlocks = numBlocks;
-    blockList = cUnit.blockList =
-        dvmCompilerNew(sizeof(BasicBlock *) * numBlocks, true);
-
-    int i;
-
-    for (i = 0, curBB = startBB; i < numBlocks; i++) {
-        blockList[i] = curBB;
-        curBB = curBB->next;
-    }
-    /* Make sure all blocks are added to the cUnit */
-    assert(curBB == NULL);
 
     /* Set the instruction set to use (NOTE: later components may change it) */
     cUnit.instructionSet = dvmCompilerInstructionSet();
 
     /* Inline transformation @ the MIR level */
     if (cUnit.hasInvoke && !(gDvmJit.disableOpt & (1 << kMethodInlining))) {
-        dvmCompilerInlineMIR(&cUnit);
+        dvmCompilerInlineMIR(&cUnit, info);
     }
 
+    cUnit.numDalvikRegisters = cUnit.method->registersSize;
+
     /* Preparation for SSA conversion */
     dvmInitializeSSAConversion(&cUnit);
 
@@ -921,8 +918,8 @@
         dvmCompilerDumpCompilationUnit(&cUnit);
     }
 
-    /* Allocate Registers */
-    dvmCompilerRegAlloc(&cUnit);
+    /* Allocate Registers using simple local allocation scheme */
+    dvmCompilerLocalRegAlloc(&cUnit);
 
     /* Convert MIR to LIR, etc. */
     dvmCompilerMIR2LIR(&cUnit);
@@ -937,25 +934,44 @@
     } while (cUnit.assemblerStatus == kRetryAll);
 
     if (cUnit.printMe) {
+        LOGD("Trace Dalvik PC: %p", startCodePtr);
         dvmCompilerCodegenDump(&cUnit);
         LOGD("End %s%s, %d Dalvik instructions",
              desc->method->clazz->descriptor, desc->method->name,
              cUnit.numInsts);
     }
 
-    /* Reset the compiler resource pool */
-    dvmCompilerArenaReset();
-
     if (cUnit.assemblerStatus == kRetryHalve) {
+        /* Reset the compiler resource pool before retry */
+        dvmCompilerArenaReset();
+
         /* Halve the instruction count and start from the top */
         return dvmCompileTrace(desc, cUnit.numInsts / 2, info, bailPtr,
                                optHints);
     }
 
+    /*
+     * If this trace uses class objects as constants,
+     * dvmJitInstallClassObjectPointers will switch the thread state
+     * to running and look up the class pointers using the descriptor/loader
+     * tuple stored in the callsite info structure. We need to make this window
+     * as short as possible since it is blocking GC.
+     */
+    if (cUnit.hasClassLiterals && info->codeAddress) {
+        dvmJitInstallClassObjectPointers(&cUnit, (char *) info->codeAddress);
+    }
+
+    /*
+     * Since callsiteinfo is allocated from the arena, delay the reset until
+     * class pointers are resolved.
+     */
+    dvmCompilerArenaReset();
+
     assert(cUnit.assemblerStatus == kSuccess);
 #if defined(WITH_JIT_TUNING)
     methodStats->nativeSize += cUnit.totalSize;
 #endif
+
     return info->codeAddress != NULL;
 }
 
@@ -972,8 +988,10 @@
 {
     switch (insn->opcode) {
         case OP_NEW_INSTANCE:
-        case OP_CHECK_CAST: {
-            ClassObject *classPtr = (void*)
+        case OP_NEW_INSTANCE_JUMBO:
+        case OP_CHECK_CAST:
+        case OP_CHECK_CAST_JUMBO: {
+            ClassObject *classPtr = (ClassObject *)(void*)
               (method->clazz->pDvmDex->pResClasses[insn->vB]);
 
             /* Class hasn't been initialized yet */
@@ -982,20 +1000,34 @@
             }
             return true;
         }
-        case OP_SGET_OBJECT:
-        case OP_SGET_BOOLEAN:
-        case OP_SGET_CHAR:
-        case OP_SGET_BYTE:
-        case OP_SGET_SHORT:
         case OP_SGET:
+        case OP_SGET_JUMBO:
         case OP_SGET_WIDE:
-        case OP_SPUT_OBJECT:
-        case OP_SPUT_BOOLEAN:
-        case OP_SPUT_CHAR:
-        case OP_SPUT_BYTE:
-        case OP_SPUT_SHORT:
+        case OP_SGET_WIDE_JUMBO:
+        case OP_SGET_OBJECT:
+        case OP_SGET_OBJECT_JUMBO:
+        case OP_SGET_BOOLEAN:
+        case OP_SGET_BOOLEAN_JUMBO:
+        case OP_SGET_BYTE:
+        case OP_SGET_BYTE_JUMBO:
+        case OP_SGET_CHAR:
+        case OP_SGET_CHAR_JUMBO:
+        case OP_SGET_SHORT:
+        case OP_SGET_SHORT_JUMBO:
         case OP_SPUT:
-        case OP_SPUT_WIDE: {
+        case OP_SPUT_JUMBO:
+        case OP_SPUT_WIDE:
+        case OP_SPUT_WIDE_JUMBO:
+        case OP_SPUT_OBJECT:
+        case OP_SPUT_OBJECT_JUMBO:
+        case OP_SPUT_BOOLEAN:
+        case OP_SPUT_BOOLEAN_JUMBO:
+        case OP_SPUT_BYTE:
+        case OP_SPUT_BYTE_JUMBO:
+        case OP_SPUT_CHAR:
+        case OP_SPUT_CHAR_JUMBO:
+        case OP_SPUT_SHORT:
+        case OP_SPUT_SHORT_JUMBO: {
             void *fieldPtr = (void*)
               (method->clazz->pDvmDex->pResFields[insn->vB]);
 
@@ -1005,7 +1037,8 @@
             return true;
         }
         case OP_INVOKE_SUPER:
-        case OP_INVOKE_SUPER_RANGE: {
+        case OP_INVOKE_SUPER_RANGE:
+        case OP_INVOKE_SUPER_JUMBO: {
             int mIndex = method->clazz->pDvmDex->
                 pResMethods[insn->vB]->methodIndex;
             const Method *calleeMethod = method->clazz->super->vtable[mIndex];
@@ -1024,8 +1057,10 @@
         }
         case OP_INVOKE_STATIC:
         case OP_INVOKE_STATIC_RANGE:
+        case OP_INVOKE_STATIC_JUMBO:
         case OP_INVOKE_DIRECT:
-        case OP_INVOKE_DIRECT_RANGE: {
+        case OP_INVOKE_DIRECT_RANGE:
+        case OP_INVOKE_DIRECT_JUMBO: {
             const Method *calleeMethod =
                 method->clazz->pDvmDex->pResMethods[insn->vB];
             if (calleeMethod == NULL) {
@@ -1033,7 +1068,8 @@
             }
             return true;
         }
-        case OP_CONST_CLASS: {
+        case OP_CONST_CLASS:
+        case OP_CONST_CLASS_JUMBO: {
             void *classPtr = (void*)
                 (method->clazz->pDvmDex->pResClasses[insn->vB]);
 
@@ -1057,6 +1093,600 @@
     }
 }
 
+/* Split an existing block from the specified code offset into two */
+static BasicBlock *splitBlock(CompilationUnit *cUnit,
+                              unsigned int codeOffset,
+                              BasicBlock *origBlock)
+{
+    MIR *insn = origBlock->firstMIRInsn;
+    while (insn) {
+        if (insn->offset == codeOffset) break;
+        insn = insn->next;
+    }
+    if (insn == NULL) {
+        LOGE("Break split failed");
+        dvmAbort();
+    }
+    BasicBlock *bottomBlock = dvmCompilerNewBB(kDalvikByteCode,
+                                               cUnit->numBlocks++);
+    dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bottomBlock);
+
+    bottomBlock->startOffset = codeOffset;
+    bottomBlock->firstMIRInsn = insn;
+    bottomBlock->lastMIRInsn = origBlock->lastMIRInsn;
+
+    /* Handle the taken path */
+    bottomBlock->taken = origBlock->taken;
+    if (bottomBlock->taken) {
+        origBlock->taken = NULL;
+        dvmCompilerClearBit(bottomBlock->taken->predecessors, origBlock->id);
+        dvmCompilerSetBit(bottomBlock->taken->predecessors, bottomBlock->id);
+    }
+
+    /* Handle the fallthrough path */
+    bottomBlock->fallThrough = origBlock->fallThrough;
+    origBlock->fallThrough = bottomBlock;
+    dvmCompilerSetBit(bottomBlock->predecessors, origBlock->id);
+    if (bottomBlock->fallThrough) {
+        dvmCompilerClearBit(bottomBlock->fallThrough->predecessors,
+                            origBlock->id);
+        dvmCompilerSetBit(bottomBlock->fallThrough->predecessors,
+                          bottomBlock->id);
+    }
+
+    /* Handle the successor list */
+    if (origBlock->successorBlockList.blockListType != kNotUsed) {
+        bottomBlock->successorBlockList = origBlock->successorBlockList;
+        origBlock->successorBlockList.blockListType = kNotUsed;
+        GrowableListIterator iterator;
+
+        dvmGrowableListIteratorInit(&bottomBlock->successorBlockList.blocks,
+                                    &iterator);
+        while (true) {
+            SuccessorBlockInfo *successorBlockInfo =
+                (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+            if (successorBlockInfo == NULL) break;
+            BasicBlock *bb = successorBlockInfo->block;
+            dvmCompilerClearBit(bb->predecessors, origBlock->id);
+            dvmCompilerSetBit(bb->predecessors, bottomBlock->id);
+        }
+    }
+
+    origBlock->lastMIRInsn = insn->prev;
+
+    insn->prev->next = NULL;
+    insn->prev = NULL;
+    return bottomBlock;
+}
+
+/*
+ * Given a code offset, find out the block that starts with it. If the offset
+ * is in the middle of an existing block, split it into two.
+ */
+static BasicBlock *findBlock(CompilationUnit *cUnit,
+                             unsigned int codeOffset,
+                             bool split, bool create)
+{
+    GrowableList *blockList = &cUnit->blockList;
+    BasicBlock *bb;
+    unsigned int i;
+
+    for (i = 0; i < blockList->numUsed; i++) {
+        bb = (BasicBlock *) blockList->elemList[i];
+        if (bb->blockType != kDalvikByteCode) continue;
+        if (bb->startOffset == codeOffset) return bb;
+        /* Check if a branch jumps into the middle of an existing block */
+        if ((split == true) && (codeOffset > bb->startOffset) &&
+            (bb->lastMIRInsn != NULL) &&
+            (codeOffset <= bb->lastMIRInsn->offset)) {
+            BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb);
+            return newBB;
+        }
+    }
+    if (create) {
+          bb = dvmCompilerNewBB(kDalvikByteCode, cUnit->numBlocks++);
+          dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bb);
+          bb->startOffset = codeOffset;
+          return bb;
+    }
+    return NULL;
+}
+
+/* Dump the CFG into a DOT graph */
+void dumpCFG(CompilationUnit *cUnit, const char *dirPrefix)
+{
+    const Method *method = cUnit->method;
+    FILE *file;
+    char* signature = dexProtoCopyMethodDescriptor(&method->prototype);
+    char *fileName = (char *) dvmCompilerNew(
+                                  strlen(dirPrefix) +
+                                  strlen(method->clazz->descriptor) +
+                                  strlen(method->name) +
+                                  strlen(signature) +
+                                  strlen(".dot") + 1, true);
+    sprintf(fileName, "%s%s%s%s.dot", dirPrefix,
+            method->clazz->descriptor, method->name, signature);
+    free(signature);
+
+    /*
+     * Convert the special characters into a filesystem- and shell-friendly
+     * format.
+     */
+    int i;
+    for (i = strlen(dirPrefix); fileName[i]; i++) {
+        if (fileName[i] == '/') {
+            fileName[i] = '_';
+        } else if (fileName[i] == ';') {
+            fileName[i] = '#';
+        } else if (fileName[i] == '$') {
+            fileName[i] = '+';
+        } else if (fileName[i] == '(' || fileName[i] == ')') {
+            fileName[i] = '@';
+        } else if (fileName[i] == '<' || fileName[i] == '>') {
+            fileName[i] = '=';
+        }
+    }
+    file = fopen(fileName, "w");
+    if (file == NULL) {
+        return;
+    }
+    fprintf(file, "digraph G {\n");
+
+    fprintf(file, "  rankdir=TB\n");
+
+    int numReachableBlocks = cUnit->numReachableBlocks;
+    int idx;
+    const GrowableList *blockList = &cUnit->blockList;
+
+    for (idx = 0; idx < numReachableBlocks; idx++) {
+        int blockIdx = cUnit->dfsOrder.elemList[idx];
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListGetElement(blockList,
+                                                                  blockIdx);
+        if (bb == NULL) break;
+        if (bb->blockType == kMethodEntryBlock) {
+            fprintf(file, "  entry [shape=Mdiamond];\n");
+        } else if (bb->blockType == kMethodExitBlock) {
+            fprintf(file, "  exit [shape=Mdiamond];\n");
+        } else if (bb->blockType == kDalvikByteCode) {
+            fprintf(file, "  block%04x [shape=record,label = \"{ \\\n",
+                    bb->startOffset);
+            const MIR *mir;
+            for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+                fprintf(file, "    {%04x %s\\l}%s\\\n", mir->offset,
+                        dvmCompilerFullDisassembler(cUnit, mir),
+                        mir->next ? " | " : " ");
+            }
+            fprintf(file, "  }\"];\n\n");
+        } else if (bb->blockType == kExceptionHandling) {
+            char blockName[BLOCK_NAME_LEN];
+
+            dvmGetBlockName(bb, blockName);
+            fprintf(file, "  %s [shape=invhouse];\n", blockName);
+        }
+
+        char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
+
+        if (bb->taken) {
+            dvmGetBlockName(bb, blockName1);
+            dvmGetBlockName(bb->taken, blockName2);
+            fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
+                    blockName1, blockName2);
+        }
+        if (bb->fallThrough) {
+            dvmGetBlockName(bb, blockName1);
+            dvmGetBlockName(bb->fallThrough, blockName2);
+            fprintf(file, "  %s:s -> %s:n\n", blockName1, blockName2);
+        }
+
+        if (bb->successorBlockList.blockListType != kNotUsed) {
+            fprintf(file, "  succ%04x [shape=%s,label = \"{ \\\n",
+                    bb->startOffset,
+                    (bb->successorBlockList.blockListType == kCatch) ?
+                        "Mrecord" : "record");
+            GrowableListIterator iterator;
+            dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
+                                        &iterator);
+            SuccessorBlockInfo *successorBlockInfo =
+                (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+
+            int succId = 0;
+            while (true) {
+                if (successorBlockInfo == NULL) break;
+
+                BasicBlock *destBlock = successorBlockInfo->block;
+                SuccessorBlockInfo *nextSuccessorBlockInfo =
+                  (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+
+                fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
+                        succId++,
+                        successorBlockInfo->key,
+                        destBlock->startOffset,
+                        (nextSuccessorBlockInfo != NULL) ? " | " : " ");
+
+                successorBlockInfo = nextSuccessorBlockInfo;
+            }
+            fprintf(file, "  }\"];\n\n");
+
+            dvmGetBlockName(bb, blockName1);
+            fprintf(file, "  %s:s -> succ%04x:n [style=dashed]\n",
+                    blockName1, bb->startOffset);
+
+            if (bb->successorBlockList.blockListType == kPackedSwitch ||
+                bb->successorBlockList.blockListType == kSparseSwitch) {
+
+                dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
+                                            &iterator);
+
+                succId = 0;
+                while (true) {
+                    SuccessorBlockInfo *successorBlockInfo =
+                        (SuccessorBlockInfo *)
+                            dvmGrowableListIteratorNext(&iterator);
+                    if (successorBlockInfo == NULL) break;
+
+                    BasicBlock *destBlock = successorBlockInfo->block;
+
+                    dvmGetBlockName(destBlock, blockName2);
+                    fprintf(file, "  succ%04x:f%d:e -> %s:n\n",
+                            bb->startOffset, succId++,
+                            blockName2);
+                }
+            }
+        }
+        fprintf(file, "\n");
+
+        /*
+         * If we need to debug the dominator tree, uncomment the following code
+         */
+#if 0
+        dvmGetBlockName(bb, blockName1);
+        fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
+                blockName1, blockName1);
+        if (bb->iDom) {
+            dvmGetBlockName(bb->iDom, blockName2);
+            fprintf(file, "  cfg%s:s -> cfg%s:n\n\n",
+                    blockName2, blockName1);
+        }
+#endif
+    }
+    fprintf(file, "}\n");
+    fclose(file);
+}
+
+/* Verify if all the successor is connected with all the claimed predecessors */
+static bool verifyPredInfo(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    BitVectorIterator bvIterator;
+
+    dvmBitVectorIteratorInit(bb->predecessors, &bvIterator);
+    while (true) {
+        int blockIdx = dvmBitVectorIteratorNext(&bvIterator);
+        if (blockIdx == -1) break;
+        BasicBlock *predBB = (BasicBlock *)
+            dvmGrowableListGetElement(&cUnit->blockList, blockIdx);
+        bool found = false;
+        if (predBB->taken == bb) {
+            found = true;
+        } else if (predBB->fallThrough == bb) {
+            found = true;
+        } else if (predBB->successorBlockList.blockListType != kNotUsed) {
+            GrowableListIterator iterator;
+            dvmGrowableListIteratorInit(&predBB->successorBlockList.blocks,
+                                        &iterator);
+            while (true) {
+                SuccessorBlockInfo *successorBlockInfo =
+                    (SuccessorBlockInfo *)
+                        dvmGrowableListIteratorNext(&iterator);
+                if (successorBlockInfo == NULL) break;
+                BasicBlock *succBB = successorBlockInfo->block;
+                if (succBB == bb) {
+                    found = true;
+                    break;
+                }
+            }
+        }
+        if (found == false) {
+            char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
+            dvmGetBlockName(bb, blockName1);
+            dvmGetBlockName(predBB, blockName2);
+            dumpCFG(cUnit, "/data/tombstones/");
+            LOGE("Successor %s not found from %s",
+                 blockName1, blockName2);
+            dvmAbort();
+        }
+    }
+    return true;
+}
+
+/* Identify code range in try blocks and set up the empty catch blocks */
+static void processTryCatchBlocks(CompilationUnit *cUnit)
+{
+    const Method *meth = cUnit->method;
+    const DexCode *pCode = dvmGetMethodCode(meth);
+    int triesSize = pCode->triesSize;
+    int i;
+    int offset;
+
+    if (triesSize == 0) {
+        return;
+    }
+
+    const DexTry *pTries = dexGetTries(pCode);
+    BitVector *tryBlockAddr = cUnit->tryBlockAddr;
+
+    /* Mark all the insn offsets in Try blocks */
+    for (i = 0; i < triesSize; i++) {
+        const DexTry* pTry = &pTries[i];
+        /* all in 16-bit units */
+        int startOffset = pTry->startAddr;
+        int endOffset = startOffset + pTry->insnCount;
+
+        for (offset = startOffset; offset < endOffset; offset++) {
+            dvmCompilerSetBit(tryBlockAddr, offset);
+        }
+    }
+
+    /* Iterate over each of the handlers to enqueue the empty Catch blocks */
+    offset = dexGetFirstHandlerOffset(pCode);
+    int handlersSize = dexGetHandlersSize(pCode);
+
+    for (i = 0; i < handlersSize; i++) {
+        DexCatchIterator iterator;
+        dexCatchIteratorInit(&iterator, pCode, offset);
+
+        for (;;) {
+            DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+
+            if (handler == NULL) {
+                break;
+            }
+
+            /*
+             * Create dummy catch blocks first. Since these are created before
+             * other blocks are processed, "split" is specified as false.
+             */
+            findBlock(cUnit, handler->address,
+                      /* split */
+                      false,
+                      /* create */
+                      true);
+        }
+
+        offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
+    }
+}
+
+/* Process instructions with the kInstrCanBranch flag */
+static void processCanBranch(CompilationUnit *cUnit, BasicBlock *curBlock,
+                             MIR *insn, int curOffset, int width, int flags,
+                             const u2* codePtr, const u2* codeEnd)
+{
+    int target = curOffset;
+    switch (insn->dalvikInsn.opcode) {
+        case OP_GOTO:
+        case OP_GOTO_16:
+        case OP_GOTO_32:
+            target += (int) insn->dalvikInsn.vA;
+            break;
+        case OP_IF_EQ:
+        case OP_IF_NE:
+        case OP_IF_LT:
+        case OP_IF_GE:
+        case OP_IF_GT:
+        case OP_IF_LE:
+            target += (int) insn->dalvikInsn.vC;
+            break;
+        case OP_IF_EQZ:
+        case OP_IF_NEZ:
+        case OP_IF_LTZ:
+        case OP_IF_GEZ:
+        case OP_IF_GTZ:
+        case OP_IF_LEZ:
+            target += (int) insn->dalvikInsn.vB;
+            break;
+        default:
+            LOGE("Unexpected opcode(%d) with kInstrCanBranch set",
+                 insn->dalvikInsn.opcode);
+            dvmAbort();
+    }
+    BasicBlock *takenBlock = findBlock(cUnit, target,
+                                       /* split */
+                                       true,
+                                       /* create */
+                                       true);
+    curBlock->taken = takenBlock;
+    dvmCompilerSetBit(takenBlock->predecessors, curBlock->id);
+
+    /* Always terminate the current block for conditional branches */
+    if (flags & kInstrCanContinue) {
+        BasicBlock *fallthroughBlock = findBlock(cUnit,
+                                                 curOffset +  width,
+                                                 /* split */
+                                                 false,
+                                                 /* create */
+                                                 true);
+        curBlock->fallThrough = fallthroughBlock;
+        dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
+    } else if (codePtr < codeEnd) {
+        /* Create a fallthrough block for real instructions (incl. OP_NOP) */
+        if (contentIsInsn(codePtr)) {
+            findBlock(cUnit, curOffset + width,
+                      /* split */
+                      false,
+                      /* create */
+                      true);
+        }
+    }
+}
+
+/* Process instructions with the kInstrCanSwitch flag */
+static void processCanSwitch(CompilationUnit *cUnit, BasicBlock *curBlock,
+                             MIR *insn, int curOffset, int width, int flags)
+{
+    u2 *switchData= (u2 *) (cUnit->method->insns + curOffset +
+                            insn->dalvikInsn.vB);
+    int size;
+    int *keyTable;
+    int *targetTable;
+    int i;
+    int firstKey;
+
+    /*
+     * Packed switch data format:
+     *  ushort ident = 0x0100   magic value
+     *  ushort size             number of entries in the table
+     *  int first_key           first (and lowest) switch case value
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (4+size*2) 16-bit code units.
+     */
+    if (insn->dalvikInsn.opcode == OP_PACKED_SWITCH) {
+        assert(switchData[0] == kPackedSwitchSignature);
+        size = switchData[1];
+        firstKey = switchData[2] | (switchData[3] << 16);
+        targetTable = (int *) &switchData[4];
+        keyTable = NULL;        // Make the compiler happy
+    /*
+     * Sparse switch data format:
+     *  ushort ident = 0x0200   magic value
+     *  ushort size             number of entries in the table; > 0
+     *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (2+size*4) 16-bit code units.
+     */
+    } else {
+        assert(switchData[0] == kSparseSwitchSignature);
+        size = switchData[1];
+        keyTable = (int *) &switchData[2];
+        targetTable = (int *) &switchData[2 + size*2];
+        firstKey = 0;   // To make the compiler happy
+    }
+
+    if (curBlock->successorBlockList.blockListType != kNotUsed) {
+        LOGE("Successor block list already in use: %d",
+             curBlock->successorBlockList.blockListType);
+        dvmAbort();
+    }
+    curBlock->successorBlockList.blockListType =
+        (insn->dalvikInsn.opcode == OP_PACKED_SWITCH) ?
+        kPackedSwitch : kSparseSwitch;
+    dvmInitGrowableList(&curBlock->successorBlockList.blocks, size);
+
+    for (i = 0; i < size; i++) {
+        BasicBlock *caseBlock = findBlock(cUnit, curOffset + targetTable[i],
+                                          /* split */
+                                          true,
+                                          /* create */
+                                          true);
+        SuccessorBlockInfo *successorBlockInfo =
+            (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
+                                                  false);
+        successorBlockInfo->block = caseBlock;
+        successorBlockInfo->key = (insn->dalvikInsn.opcode == OP_PACKED_SWITCH)?
+                                  firstKey + i : keyTable[i];
+        dvmInsertGrowableList(&curBlock->successorBlockList.blocks,
+                              (intptr_t) successorBlockInfo);
+        dvmCompilerSetBit(caseBlock->predecessors, curBlock->id);
+    }
+
+    /* Fall-through case */
+    BasicBlock *fallthroughBlock = findBlock(cUnit,
+                                             curOffset +  width,
+                                             /* split */
+                                             false,
+                                             /* create */
+                                             true);
+    curBlock->fallThrough = fallthroughBlock;
+    dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
+}
+
+/* Process instructions with the kInstrCanThrow flag */
+static void processCanThrow(CompilationUnit *cUnit, BasicBlock *curBlock,
+                            MIR *insn, int curOffset, int width, int flags,
+                            BitVector *tryBlockAddr, const u2 *codePtr,
+                            const u2* codeEnd)
+{
+    const Method *method = cUnit->method;
+    const DexCode *dexCode = dvmGetMethodCode(method);
+
+    /* In try block */
+    if (dvmIsBitSet(tryBlockAddr, curOffset)) {
+        DexCatchIterator iterator;
+
+        if (!dexFindCatchHandler(&iterator, dexCode, curOffset)) {
+            LOGE("Catch block not found in dexfile for insn %x in %s",
+                 curOffset, method->name);
+            dvmAbort();
+
+        }
+        if (curBlock->successorBlockList.blockListType != kNotUsed) {
+            LOGE("Successor block list already in use: %d",
+                 curBlock->successorBlockList.blockListType);
+            dvmAbort();
+        }
+        curBlock->successorBlockList.blockListType = kCatch;
+        dvmInitGrowableList(&curBlock->successorBlockList.blocks, 2);
+
+        for (;;) {
+            DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+
+            if (handler == NULL) {
+                break;
+            }
+
+            BasicBlock *catchBlock = findBlock(cUnit, handler->address,
+                                               /* split */
+                                               false,
+                                               /* create */
+                                               false);
+
+            SuccessorBlockInfo *successorBlockInfo =
+              (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
+                                                    false);
+            successorBlockInfo->block = catchBlock;
+            successorBlockInfo->key = handler->typeIdx;
+            dvmInsertGrowableList(&curBlock->successorBlockList.blocks,
+                                  (intptr_t) successorBlockInfo);
+            dvmCompilerSetBit(catchBlock->predecessors, curBlock->id);
+        }
+    } else {
+        BasicBlock *ehBlock = dvmCompilerNewBB(kExceptionHandling,
+                                               cUnit->numBlocks++);
+        curBlock->taken = ehBlock;
+        dvmInsertGrowableList(&cUnit->blockList, (intptr_t) ehBlock);
+        ehBlock->startOffset = curOffset;
+        dvmCompilerSetBit(ehBlock->predecessors, curBlock->id);
+    }
+
+    /*
+     * Force the current block to terminate.
+     *
+     * Data may be present before codeEnd, so we need to parse it to know
+     * whether it is code or data.
+     */
+    if (codePtr < codeEnd) {
+        /* Create a fallthrough block for real instructions (incl. OP_NOP) */
+        if (contentIsInsn(codePtr)) {
+            BasicBlock *fallthroughBlock = findBlock(cUnit,
+                                                     curOffset + width,
+                                                     /* split */
+                                                     false,
+                                                     /* create */
+                                                     true);
+            /*
+             * OP_THROW and OP_THROW_VERIFICATION_ERROR are unconditional
+             * branches.
+             */
+            if (insn->dalvikInsn.opcode != OP_THROW_VERIFICATION_ERROR &&
+                insn->dalvikInsn.opcode != OP_THROW) {
+                curBlock->fallThrough = fallthroughBlock;
+                dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
+            }
+        }
+    }
+}
+
 /*
  * Similar to dvmCompileTrace, but the entity processed here is the whole
  * method.
@@ -1064,248 +1694,190 @@
  * TODO: implementation will be revisited when the trace builder can provide
  * whole-method traces.
  */
-bool dvmCompileMethod(CompilationUnit *cUnit, const Method *method,
-                      JitTranslationInfo *info)
+bool dvmCompileMethod(const Method *method, JitTranslationInfo *info)
 {
+    CompilationUnit cUnit;
     const DexCode *dexCode = dvmGetMethodCode(method);
     const u2 *codePtr = dexCode->insns;
     const u2 *codeEnd = dexCode->insns + dexCode->insnsSize;
-    int blockID = 0;
+    int numBlocks = 0;
     unsigned int curOffset = 0;
 
-    /* If we've already compiled this trace, just return success */
-    if (dvmJitGetCodeAddr(codePtr) && !info->discardResult) {
-        return true;
+    /* Method already compiled */
+    if (dvmJitGetMethodAddr(codePtr)) {
+        info->codeAddress = NULL;
+        return false;
     }
 
-    /* Doing method-based compilation */
-    cUnit->wholeMethod = true;
+    memset(&cUnit, 0, sizeof(cUnit));
+    cUnit.method = method;
 
-    BasicBlock *firstBlock = dvmCompilerNewBB(kDalvikByteCode);
-    firstBlock->id = blockID++;
+    cUnit.methodJitMode = true;
 
-    /* Allocate the bit-vector to track the beginning of basic blocks */
-    BitVector *bbStartAddr = dvmCompilerAllocBitVector(dexCode->insnsSize+1,
-                                                       false);
-    dvmCompilerSetBit(bbStartAddr, 0);
-
-    int numInvokeTargets = 0;
+    /* Initialize the block list */
+    dvmInitGrowableList(&cUnit.blockList, 4);
 
     /*
-     * Sequentially go through every instruction first and put them in a single
-     * basic block. Identify block boundaries at the mean time.
+     * FIXME - PC reconstruction list won't be needed after the codegen routines
+     * are enhanced to true method mode.
      */
+    /* Initialize the PC reconstruction list */
+    dvmInitGrowableList(&cUnit.pcReconstructionList, 8);
+
+    /* Allocate the bit-vector to track the beginning of basic blocks */
+    BitVector *tryBlockAddr = dvmCompilerAllocBitVector(dexCode->insnsSize,
+                                                        true /* expandable */);
+    cUnit.tryBlockAddr = tryBlockAddr;
+
+    /* Create the default entry and exit blocks and enter them to the list */
+    BasicBlock *entryBlock = dvmCompilerNewBB(kMethodEntryBlock, numBlocks++);
+    BasicBlock *exitBlock = dvmCompilerNewBB(kMethodExitBlock, numBlocks++);
+
+    cUnit.entryBlock = entryBlock;
+    cUnit.exitBlock = exitBlock;
+
+    dvmInsertGrowableList(&cUnit.blockList, (intptr_t) entryBlock);
+    dvmInsertGrowableList(&cUnit.blockList, (intptr_t) exitBlock);
+
+    /* Current block to record parsed instructions */
+    BasicBlock *curBlock = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
+    curBlock->startOffset = 0;
+    dvmInsertGrowableList(&cUnit.blockList, (intptr_t) curBlock);
+    entryBlock->fallThrough = curBlock;
+    dvmCompilerSetBit(curBlock->predecessors, entryBlock->id);
+
+    /*
+     * Store back the number of blocks since new blocks may be created of
+     * accessing cUnit.
+     */
+    cUnit.numBlocks = numBlocks;
+
+    /* Identify code range in try blocks and set up the empty catch blocks */
+    processTryCatchBlocks(&cUnit);
+
+    /* Parse all instructions and put them into containing basic blocks */
     while (codePtr < codeEnd) {
-        MIR *insn = dvmCompilerNew(sizeof(MIR), true);
+        MIR *insn = (MIR *) dvmCompilerNew(sizeof(MIR), true);
         insn->offset = curOffset;
         int width = parseInsn(codePtr, &insn->dalvikInsn, false);
-        bool isInvoke = false;
-        const Method *callee;
         insn->width = width;
 
         /* Terminate when the data section is seen */
         if (width == 0)
             break;
 
-        if (!dvmCompilerCanIncludeThisInstruction(cUnit->method,
-						  &insn->dalvikInsn)) {
-            return false;
-        }
-
-        dvmCompilerAppendMIR(firstBlock, insn);
-        /*
-         * Check whether this is a block ending instruction and whether it
-         * suggests the start of a new block
-         */
-        unsigned int target = curOffset;
-
-        /*
-         * If findBlockBoundary returns true, it means the current instruction
-         * is terminating the current block. If it is a branch, the target
-         * address will be recorded in target.
-         */
-        if (findBlockBoundary(method, insn, curOffset, &target, &isInvoke,
-                              &callee)) {
-            dvmCompilerSetBit(bbStartAddr, curOffset + width);
-            /* Each invoke needs a chaining cell block */
-            if (isInvoke) {
-                numInvokeTargets++;
-            }
-            /* A branch will end the current block */
-            else if (target != curOffset && target != UNKNOWN_TARGET) {
-                dvmCompilerSetBit(bbStartAddr, target);
-            }
-        }
+        dvmCompilerAppendMIR(curBlock, insn);
 
         codePtr += width;
-        /* each bit represents 16-bit quantity */
+        int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
+
+        if (flags & kInstrCanBranch) {
+            processCanBranch(&cUnit, curBlock, insn, curOffset, width, flags,
+                             codePtr, codeEnd);
+        } else if (flags & kInstrCanReturn) {
+            curBlock->fallThrough = exitBlock;
+            dvmCompilerSetBit(exitBlock->predecessors, curBlock->id);
+            /*
+             * Terminate the current block if there are instructions
+             * afterwards.
+             */
+            if (codePtr < codeEnd) {
+                /*
+                 * Create a fallthrough block for real instructions
+                 * (incl. OP_NOP).
+                 */
+                if (contentIsInsn(codePtr)) {
+                    findBlock(&cUnit, curOffset + width,
+                              /* split */
+                              false,
+                              /* create */
+                              true);
+                }
+            }
+        } else if (flags & kInstrCanThrow) {
+            processCanThrow(&cUnit, curBlock, insn, curOffset, width, flags,
+                            tryBlockAddr, codePtr, codeEnd);
+        } else if (flags & kInstrCanSwitch) {
+            processCanSwitch(&cUnit, curBlock, insn, curOffset, width, flags);
+        }
         curOffset += width;
-    }
+        BasicBlock *nextBlock = findBlock(&cUnit, curOffset,
+                                          /* split */
+                                          false,
+                                          /* create */
+                                          false);
+        if (nextBlock) {
+            /*
+             * The next instruction could be the target of a previously parsed
+             * forward branch so a block is already created. If the current
+             * instruction is not an unconditional branch, connect them through
+             * the fall-through link.
+             */
+            assert(curBlock->fallThrough == NULL ||
+                   curBlock->fallThrough == nextBlock ||
+                   curBlock->fallThrough == exitBlock);
 
-    /*
-     * The number of blocks will be equal to the number of bits set to 1 in the
-     * bit vector minus 1, because the bit representing the location after the
-     * last instruction is set to one.
-     *
-     * We also add additional blocks for invoke chaining and the number is
-     * denoted by numInvokeTargets.
-     */
-    int numBlocks = dvmCountSetBits(bbStartAddr);
-    if (dvmIsBitSet(bbStartAddr, dexCode->insnsSize)) {
-        numBlocks--;
-    }
-
-    BasicBlock **blockList;
-    blockList = cUnit->blockList =
-        dvmCompilerNew(sizeof(BasicBlock *) * (numBlocks + numInvokeTargets),
-                       true);
-
-    /*
-     * Register the first block onto the list and start splitting it into
-     * sub-blocks.
-     */
-    blockList[0] = firstBlock;
-    cUnit->numBlocks = 1;
-
-    int i;
-    for (i = 0; i < numBlocks; i++) {
-        MIR *insn;
-        BasicBlock *curBB = blockList[i];
-        curOffset = curBB->lastMIRInsn->offset;
-
-        for (insn = curBB->firstMIRInsn->next; insn; insn = insn->next) {
-            /* Found the beginning of a new block, see if it is created yet */
-            if (dvmIsBitSet(bbStartAddr, insn->offset)) {
-                int j;
-                for (j = 0; j < cUnit->numBlocks; j++) {
-                    if (blockList[j]->firstMIRInsn->offset == insn->offset)
-                        break;
-                }
-
-                /* Block not split yet - do it now */
-                if (j == cUnit->numBlocks) {
-                    BasicBlock *newBB = dvmCompilerNewBB(kDalvikByteCode);
-                    newBB->id = blockID++;
-                    newBB->firstMIRInsn = insn;
-                    newBB->startOffset = insn->offset;
-                    newBB->lastMIRInsn = curBB->lastMIRInsn;
-                    curBB->lastMIRInsn = insn->prev;
-                    insn->prev->next = NULL;
-                    insn->prev = NULL;
-
-                    /*
-                     * If the insn is not an unconditional branch, set up the
-                     * fallthrough link.
-                     */
-                    if (!isUnconditionalBranch(curBB->lastMIRInsn)) {
-                        curBB->fallThrough = newBB;
-                    }
-
-                    /*
-                     * Fallthrough block of an invoke instruction needs to be
-                     * aligned to 4-byte boundary (alignment instruction to be
-                     * inserted later.
-                     */
-                    if (dexGetFlagsFromOpcode(curBB->lastMIRInsn->dalvikInsn.opcode)
-                            & kInstrInvoke) {
-                        newBB->isFallThroughFromInvoke = true;
-                    }
-
-                    /* enqueue the new block */
-                    blockList[cUnit->numBlocks++] = newBB;
-                    break;
-                }
+            if ((curBlock->fallThrough == NULL) &&
+                (flags & kInstrCanContinue)) {
+                curBlock->fallThrough = nextBlock;
+                dvmCompilerSetBit(nextBlock->predecessors, curBlock->id);
             }
+            curBlock = nextBlock;
         }
     }
 
-    if (numBlocks != cUnit->numBlocks) {
-        LOGE("Expect %d vs %d basic blocks\n", numBlocks, cUnit->numBlocks);
-        dvmCompilerAbort(cUnit);
+    if (cUnit.printMe) {
+        dvmCompilerDumpCompilationUnit(&cUnit);
     }
 
-    /* Connect the basic blocks through the taken links */
-    for (i = 0; i < numBlocks; i++) {
-        BasicBlock *curBB = blockList[i];
-        MIR *insn = curBB->lastMIRInsn;
-        unsigned int target = insn->offset;
-        bool isInvoke = false;
-        const Method *callee = NULL;
+    /* Adjust this value accordingly once inlining is performed */
+    cUnit.numDalvikRegisters = cUnit.method->registersSize;
 
-        findBlockBoundary(method, insn, target, &target, &isInvoke, &callee);
+    /* Verify if all blocks are connected as claimed */
+    /* FIXME - to be disabled in the future */
+    dvmCompilerDataFlowAnalysisDispatcher(&cUnit, verifyPredInfo,
+                                          kAllNodes,
+                                          false /* isIterative */);
 
-        /* Found a block ended on a branch (not invoke) */
-        if (isInvoke == false && target != insn->offset) {
-            int j;
-            /* Forward branch */
-            if (target > insn->offset) {
-                j = i + 1;
-            } else {
-                /* Backward branch */
-                j = 0;
-            }
-            for (; j < numBlocks; j++) {
-                if (blockList[j]->firstMIRInsn->offset == target) {
-                    curBB->taken = blockList[j];
-                    break;
-                }
-            }
-        }
 
-        if (isInvoke) {
-            BasicBlock *newBB;
-            /* Monomorphic callee */
-            if (callee) {
-                newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton);
-                newBB->startOffset = 0;
-                newBB->containingMethod = callee;
-            /* Will resolve at runtime */
-            } else {
-                newBB = dvmCompilerNewBB(kChainingCellInvokePredicted);
-                newBB->startOffset = 0;
-            }
-            newBB->id = blockID++;
-            curBB->taken = newBB;
-            /* enqueue the new block */
-            blockList[cUnit->numBlocks++] = newBB;
-        }
-    }
+    /* Perform SSA transformation for the whole method */
+    dvmCompilerMethodSSATransformation(&cUnit);
 
-    if (cUnit->numBlocks != numBlocks + numInvokeTargets) {
-        LOGE("Expect %d vs %d total blocks\n", numBlocks + numInvokeTargets,
-             cUnit->numBlocks);
-        dvmCompilerDumpCompilationUnit(cUnit);
-        dvmCompilerAbort(cUnit);
-    }
+    dvmCompilerInitializeRegAlloc(&cUnit);  // Needs to happen after SSA naming
 
-    /* Set the instruction set to use (NOTE: later components may change it) */
-    cUnit->instructionSet = dvmCompilerInstructionSet();
-
-    /* Preparation for SSA conversion */
-    dvmInitializeSSAConversion(cUnit);
-
-    /* SSA analysis */
-    dvmCompilerNonLoopAnalysis(cUnit);
-
-    /* Needs to happen after SSA naming */
-    dvmCompilerInitializeRegAlloc(cUnit);
-
-    /* Allocate Registers */
-    dvmCompilerRegAlloc(cUnit);
+    /* Allocate Registers using simple local allocation scheme */
+    dvmCompilerLocalRegAlloc(&cUnit);
 
     /* Convert MIR to LIR, etc. */
-    dvmCompilerMIR2LIR(cUnit);
+    dvmCompilerMethodMIR2LIR(&cUnit);
 
-    /* Convert LIR into machine code. */
-    dvmCompilerAssembleLIR(cUnit, info);
+    // Debugging only
+    //dumpCFG(&cUnit, "/data/tombstones/");
 
-    if (cUnit->assemblerStatus != kSuccess) {
-        return false;
+    /* Method is not empty */
+    if (cUnit.firstLIRInsn) {
+        /* Convert LIR into machine code. Loop for recoverable retries */
+        do {
+            dvmCompilerAssembleLIR(&cUnit, info);
+            cUnit.assemblerRetries++;
+            if (cUnit.printMe && cUnit.assemblerStatus != kSuccess)
+                LOGD("Assembler abort #%d on %d",cUnit.assemblerRetries,
+                      cUnit.assemblerStatus);
+        } while (cUnit.assemblerStatus == kRetryAll);
+
+        if (cUnit.printMe) {
+            dvmCompilerCodegenDump(&cUnit);
+        }
+
+        if (info->codeAddress) {
+            dvmJitSetCodeAddr(dexCode->insns, info->codeAddress,
+                              info->instructionSet, true, 0);
+            /*
+             * Clear the codeAddress for the enclosing trace to reuse the info
+             */
+            info->codeAddress = NULL;
+        }
     }
 
-    dvmCompilerDumpCompilationUnit(cUnit);
-
-    dvmCompilerArenaReset();
-
-    return info->codeAddress != NULL;
+    return false;
 }
diff --git a/vm/compiler/InlineTransformation.c b/vm/compiler/InlineTransformation.c
index 0b1330f..6cf2d43 100644
--- a/vm/compiler/InlineTransformation.c
+++ b/vm/compiler/InlineTransformation.c
@@ -34,7 +34,7 @@
     }
 }
 
-static void inlineGetter(CompilationUnit *cUnit,
+static bool inlineGetter(CompilationUnit *cUnit,
                          const Method *calleeMethod,
                          MIR *invokeMIR,
                          BasicBlock *invokeBB,
@@ -43,13 +43,13 @@
 {
     BasicBlock *moveResultBB = invokeBB->fallThrough;
     MIR *moveResultMIR = moveResultBB->firstMIRInsn;
-    MIR *newGetterMIR = dvmCompilerNew(sizeof(MIR), true);
+    MIR *newGetterMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
     DecodedInstruction getterInsn;
 
     dexDecodeInstruction(calleeMethod->insns, &getterInsn);
 
     if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &getterInsn))
-        return;
+        return false;
 
     /*
      * Some getters (especially invoked through interface) are not followed
@@ -59,7 +59,7 @@
         (moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT &&
          moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT_OBJECT &&
          moveResultMIR->dalvikInsn.opcode != OP_MOVE_RESULT_WIDE)) {
-        return;
+        return false;
     }
 
     int dfFlags = dvmCompilerDataFlowAttributes[getterInsn.opcode];
@@ -100,7 +100,7 @@
     dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, newGetterMIR);
 
     if (isPredicted) {
-        MIR *invokeMIRSlow = dvmCompilerNew(sizeof(MIR), true);
+        MIR *invokeMIRSlow = (MIR *)dvmCompilerNew(sizeof(MIR), true);
         *invokeMIRSlow = *invokeMIR;
         invokeMIR->dalvikInsn.opcode = kMirOpCheckInlinePrediction;
 
@@ -124,23 +124,23 @@
 #endif
     }
 
-    return;
+    return true;
 }
 
-static void inlineSetter(CompilationUnit *cUnit,
+static bool inlineSetter(CompilationUnit *cUnit,
                          const Method *calleeMethod,
                          MIR *invokeMIR,
                          BasicBlock *invokeBB,
                          bool isPredicted,
                          bool isRange)
 {
-    MIR *newSetterMIR = dvmCompilerNew(sizeof(MIR), true);
+    MIR *newSetterMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
     DecodedInstruction setterInsn;
 
     dexDecodeInstruction(calleeMethod->insns, &setterInsn);
 
     if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &setterInsn))
-        return;
+        return false;
 
     int dfFlags = dvmCompilerDataFlowAttributes[setterInsn.opcode];
 
@@ -179,7 +179,7 @@
     dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, newSetterMIR);
 
     if (isPredicted) {
-        MIR *invokeMIRSlow = dvmCompilerNew(sizeof(MIR), true);
+        MIR *invokeMIRSlow = (MIR *)dvmCompilerNew(sizeof(MIR), true);
         *invokeMIRSlow = *invokeMIR;
         invokeMIR->dalvikInsn.opcode = kMirOpCheckInlinePrediction;
 
@@ -205,17 +205,17 @@
 #endif
     }
 
-    return;
+    return true;
 }
 
-static void tryInlineSingletonCallsite(CompilationUnit *cUnit,
+static bool tryInlineSingletonCallsite(CompilationUnit *cUnit,
                                        const Method *calleeMethod,
                                        MIR *invokeMIR,
                                        BasicBlock *invokeBB,
                                        bool isRange)
 {
     /* Not a Java method */
-    if (dvmIsNativeMethod(calleeMethod)) return;
+    if (dvmIsNativeMethod(calleeMethod)) return false;
 
     CompilerMethodStats *methodStats =
         dvmCompilerAnalyzeMethodBody(calleeMethod, true);
@@ -229,69 +229,74 @@
          * the PC reconstruction or chaining cell).
          */
         invokeBB->needFallThroughBranch = true;
-        return;
+        return true;
     }
 
     if (methodStats->attributes & METHOD_IS_GETTER) {
-        inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, isRange);
-        return;
+        return inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, false,
+                            isRange);
     } else if (methodStats->attributes & METHOD_IS_SETTER) {
-        inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, false, isRange);
-        return;
+        return inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, false,
+                            isRange);
     }
+    return false;
 }
 
-static void inlineEmptyVirtualCallee(CompilationUnit *cUnit,
+static bool inlineEmptyVirtualCallee(CompilationUnit *cUnit,
                                      const Method *calleeMethod,
                                      MIR *invokeMIR,
                                      BasicBlock *invokeBB)
 {
-    MIR *invokeMIRSlow = dvmCompilerNew(sizeof(MIR), true);
+    MIR *invokeMIRSlow = (MIR *)dvmCompilerNew(sizeof(MIR), true);
     *invokeMIRSlow = *invokeMIR;
     invokeMIR->dalvikInsn.opcode = kMirOpCheckInlinePrediction;
 
     dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, invokeMIRSlow);
     invokeMIRSlow->OptimizationFlags |= MIR_INLINED_PRED;
+    return true;
 }
 
-static void tryInlineVirtualCallsite(CompilationUnit *cUnit,
+static bool tryInlineVirtualCallsite(CompilationUnit *cUnit,
                                      const Method *calleeMethod,
                                      MIR *invokeMIR,
                                      BasicBlock *invokeBB,
                                      bool isRange)
 {
     /* Not a Java method */
-    if (dvmIsNativeMethod(calleeMethod)) return;
+    if (dvmIsNativeMethod(calleeMethod)) return false;
 
     CompilerMethodStats *methodStats =
         dvmCompilerAnalyzeMethodBody(calleeMethod, true);
 
     /* Empty callee - do nothing by checking the clazz pointer */
     if (methodStats->attributes & METHOD_IS_EMPTY) {
-        inlineEmptyVirtualCallee(cUnit, calleeMethod, invokeMIR, invokeBB);
-        return;
+        return inlineEmptyVirtualCallee(cUnit, calleeMethod, invokeMIR,
+                                        invokeBB);
     }
 
     if (methodStats->attributes & METHOD_IS_GETTER) {
-        inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, isRange);
-        return;
+        return inlineGetter(cUnit, calleeMethod, invokeMIR, invokeBB, true,
+                            isRange);
     } else if (methodStats->attributes & METHOD_IS_SETTER) {
-        inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, true, isRange);
-        return;
+        return inlineSetter(cUnit, calleeMethod, invokeMIR, invokeBB, true,
+                            isRange);
     }
+    return false;
 }
 
 
-void dvmCompilerInlineMIR(CompilationUnit *cUnit)
+void dvmCompilerInlineMIR(CompilationUnit *cUnit, JitTranslationInfo *info)
 {
-    int i;
     bool isRange = false;
+    GrowableListIterator iterator;
 
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
     /*
      * Analyze the basic block containing an invoke to see if it can be inlined
      */
-    for (i = 0; i < cUnit->numBlocks; i++) {
-        BasicBlock *bb = cUnit->blockList[i];
+    while (true) {
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
         if (bb->blockType != kDalvikByteCode)
             continue;
         MIR *lastMIRInsn = bb->lastMIRInsn;
@@ -302,6 +307,10 @@
         if ((flags & kInstrInvoke) == 0)
             continue;
 
+        /* Disable inlining when doing method tracing */
+        if (gDvmJit.methodTraceSupport)
+            continue;
+
         /*
          * If the invoke itself is selected for single stepping, don't bother
          * to inline it.
@@ -331,8 +340,30 @@
         }
 
         if (calleeMethod) {
-            tryInlineSingletonCallsite(cUnit, calleeMethod, lastMIRInsn, bb,
-                                       isRange);
+            bool inlined = tryInlineSingletonCallsite(cUnit, calleeMethod,
+                                                      lastMIRInsn, bb, isRange);
+            if (!inlined &&
+                !(gDvmJit.disableOpt & (1 << kMethodJit)) &&
+                !dvmIsNativeMethod(calleeMethod)) {
+                CompilerMethodStats *methodStats =
+                    dvmCompilerAnalyzeMethodBody(calleeMethod, true);
+                if ((methodStats->attributes & METHOD_IS_LEAF) &&
+                    !(methodStats->attributes & METHOD_CANNOT_COMPILE)) {
+                    /* Callee has been previously compiled */
+                    if (dvmJitGetMethodAddr(calleeMethod->insns)) {
+                        lastMIRInsn->OptimizationFlags |= MIR_INVOKE_METHOD_JIT;
+                    } else {
+                        /* Compile the callee first */
+                        dvmCompileMethod(calleeMethod, info);
+                        if (dvmJitGetMethodAddr(calleeMethod->insns)) {
+                            lastMIRInsn->OptimizationFlags |=
+                                MIR_INVOKE_METHOD_JIT;
+                        } else {
+                            methodStats->attributes |= METHOD_CANNOT_COMPILE;
+                        }
+                    }
+                }
+            }
             return;
         }
 
@@ -354,8 +385,30 @@
         }
 
         if (calleeMethod) {
-            tryInlineVirtualCallsite(cUnit, calleeMethod, lastMIRInsn, bb,
-                                     isRange);
+            bool inlined = tryInlineVirtualCallsite(cUnit, calleeMethod,
+                                                    lastMIRInsn, bb, isRange);
+            if (!inlined &&
+                !(gDvmJit.disableOpt & (1 << kMethodJit)) &&
+                !dvmIsNativeMethod(calleeMethod)) {
+                CompilerMethodStats *methodStats =
+                    dvmCompilerAnalyzeMethodBody(calleeMethod, true);
+                if ((methodStats->attributes & METHOD_IS_LEAF) &&
+                    !(methodStats->attributes & METHOD_CANNOT_COMPILE)) {
+                    /* Callee has been previously compiled */
+                    if (dvmJitGetMethodAddr(calleeMethod->insns)) {
+                        lastMIRInsn->OptimizationFlags |= MIR_INVOKE_METHOD_JIT;
+                    } else {
+                        /* Compile the callee first */
+                        dvmCompileMethod(calleeMethod, info);
+                        if (dvmJitGetMethodAddr(calleeMethod->insns)) {
+                            lastMIRInsn->OptimizationFlags |=
+                                MIR_INVOKE_METHOD_JIT;
+                        } else {
+                            methodStats->attributes |= METHOD_CANNOT_COMPILE;
+                        }
+                    }
+                }
+            }
             return;
         }
     }
diff --git a/vm/compiler/IntermediateRep.c b/vm/compiler/IntermediateRep.c
index 825a690..db68c3c 100644
--- a/vm/compiler/IntermediateRep.c
+++ b/vm/compiler/IntermediateRep.c
@@ -18,10 +18,13 @@
 #include "CompilerInternals.h"
 
 /* Allocate a new basic block */
-BasicBlock *dvmCompilerNewBB(BBType blockType)
+BasicBlock *dvmCompilerNewBB(BBType blockType, int blockId)
 {
-    BasicBlock *bb = dvmCompilerNew(sizeof(BasicBlock), true);
+    BasicBlock *bb = (BasicBlock *)dvmCompilerNew(sizeof(BasicBlock), true);
     bb->blockType = blockType;
+    bb->id = blockId;
+    bb->predecessors = dvmCompilerAllocBitVector(blockId > 32 ? blockId : 32,
+                                                 true /* expandable */);
     return bb;
 }
 
diff --git a/vm/compiler/Loop.c b/vm/compiler/Loop.c
index 031464c..9ee430d 100644
--- a/vm/compiler/Loop.c
+++ b/vm/compiler/Loop.c
@@ -38,18 +38,24 @@
  */
 static void handlePhiPlacement(CompilationUnit *cUnit)
 {
-    BasicBlock *entry = cUnit->blockList[0];
-    BasicBlock *loopBody = cUnit->blockList[1];
-    BasicBlock *loopBranch = cUnit->blockList[2];
+    BasicBlock *entry =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 0);
+    BasicBlock *loopBody =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 1);
+    BasicBlock *loopBranch =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 2);
     dvmCopyBitVector(entry->dataFlowInfo->defV,
                      loopBody->dataFlowInfo->liveInV);
 
     BitVector *phiV = dvmCompilerAllocBitVector(cUnit->method->registersSize,
                                                 false);
+    BitVector *phi2V = dvmCompilerAllocBitVector(cUnit->method->registersSize,
+                                                 false);
     dvmIntersectBitVectors(phiV, entry->dataFlowInfo->defV,
                            loopBody->dataFlowInfo->defV);
-    dvmIntersectBitVectors(phiV, entry->dataFlowInfo->defV,
+    dvmIntersectBitVectors(phi2V, entry->dataFlowInfo->defV,
                            loopBranch->dataFlowInfo->defV);
+    dvmUnifyBitVectors(phiV, phiV, phi2V);
 
     /* Insert the PHI MIRs */
     int i;
@@ -57,7 +63,7 @@
         if (!dvmIsBitSet(phiV, i)) {
             continue;
         }
-        MIR *phi = dvmCompilerNew(sizeof(MIR), true);
+        MIR *phi = (MIR *)dvmCompilerNew(sizeof(MIR), true);
         phi->dalvikInsn.opcode = kMirOpPhi;
         phi->dalvikInsn.vA = i;
         dvmCompilerPrependMIR(loopBody, phi);
@@ -66,9 +72,12 @@
 
 static void fillPhiNodeContents(CompilationUnit *cUnit)
 {
-    BasicBlock *entry = cUnit->blockList[0];
-    BasicBlock *loopBody = cUnit->blockList[1];
-    BasicBlock *loopBranch = cUnit->blockList[2];
+    BasicBlock *entry =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 0);
+    BasicBlock *loopBody =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 1);
+    BasicBlock *loopBranch =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 2);
     MIR *mir;
 
     for (mir = loopBody->firstMIRInsn; mir; mir = mir->next) {
@@ -76,7 +85,7 @@
         int dalvikReg = mir->dalvikInsn.vA;
 
         mir->ssaRep->numUses = 2;
-        mir->ssaRep->uses = dvmCompilerNew(sizeof(int) * 2, false);
+        mir->ssaRep->uses = (int *)dvmCompilerNew(sizeof(int) * 2, false);
         mir->ssaRep->uses[0] =
             DECODE_REG(entry->dataFlowInfo->dalvikToSSAMap[dalvikReg]);
         mir->ssaRep->uses[1] =
@@ -165,7 +174,8 @@
 static bool isLoopOptimizable(CompilationUnit *cUnit)
 {
     unsigned int i;
-    BasicBlock *loopBranch = cUnit->blockList[2];
+    BasicBlock *loopBranch =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 2);
     LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
 
     if (loopAnalysis->numBasicIV != 1) return false;
@@ -283,13 +293,14 @@
             }
             if (arrayAccessInfo == NULL) {
                 arrayAccessInfo =
-                    dvmCompilerNew(sizeof(ArrayAccessInfo), false);
+                    (ArrayAccessInfo *)dvmCompilerNew(sizeof(ArrayAccessInfo),
+                                                      false);
                 arrayAccessInfo->ivReg = ivInfo->basicSSAReg;
                 arrayAccessInfo->arrayReg = arrayReg;
                 arrayAccessInfo->maxC = (ivInfo->c > 0) ? ivInfo->c : 0;
                 arrayAccessInfo->minC = (ivInfo->c < 0) ? ivInfo->c : 0;
                 dvmInsertGrowableList(loopAnalysis->arrayAccessInfo,
-                                      arrayAccessInfo);
+                                      (intptr_t) arrayAccessInfo);
             }
             break;
         }
@@ -299,7 +310,8 @@
 /* Returns true if the loop body cannot throw any exceptions */
 static bool doLoopBodyCodeMotion(CompilationUnit *cUnit)
 {
-    BasicBlock *loopBody = cUnit->blockList[1];
+    BasicBlock *loopBody =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 1);
     MIR *mir;
     bool loopBodyCanThrow = false;
 
@@ -386,7 +398,8 @@
 static void genHoistedChecks(CompilationUnit *cUnit)
 {
     unsigned int i;
-    BasicBlock *entry = cUnit->blockList[0];
+    BasicBlock *entry =
+        (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 0);
     LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
     int globalMaxC = 0;
     int globalMinC = 0;
@@ -402,7 +415,7 @@
         idxReg = DECODE_REG(
             dvmConvertSSARegToDalvik(cUnit, arrayAccessInfo->ivReg));
 
-        MIR *rangeCheckMIR = dvmCompilerNew(sizeof(MIR), true);
+        MIR *rangeCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
         rangeCheckMIR->dalvikInsn.opcode = (loopAnalysis->isCountUpLoop) ?
             kMirOpNullNRangeUpCheck : kMirOpNullNRangeDownCheck;
         rangeCheckMIR->dalvikInsn.vA = arrayReg;
@@ -422,7 +435,7 @@
 
     if (loopAnalysis->arrayAccessInfo->numUsed != 0) {
         if (loopAnalysis->isCountUpLoop) {
-            MIR *boundCheckMIR = dvmCompilerNew(sizeof(MIR), true);
+            MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
             boundCheckMIR->dalvikInsn.opcode = kMirOpLowerBound;
             boundCheckMIR->dalvikInsn.vA = idxReg;
             boundCheckMIR->dalvikInsn.vB = globalMinC;
@@ -430,7 +443,7 @@
         } else {
             if (loopAnalysis->loopBranchOpcode == OP_IF_LT ||
                 loopAnalysis->loopBranchOpcode == OP_IF_LE) {
-                MIR *boundCheckMIR = dvmCompilerNew(sizeof(MIR), true);
+                MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
                 boundCheckMIR->dalvikInsn.opcode = kMirOpLowerBound;
                 boundCheckMIR->dalvikInsn.vA = loopAnalysis->endConditionReg;
                 boundCheckMIR->dalvikInsn.vB = globalMinC;
@@ -447,14 +460,14 @@
             } else if (loopAnalysis->loopBranchOpcode == OP_IF_LTZ) {
                 /* Array index will fall below 0 */
                 if (globalMinC < 0) {
-                    MIR *boundCheckMIR = dvmCompilerNew(sizeof(MIR), true);
+                    MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
                     boundCheckMIR->dalvikInsn.opcode = kMirOpPunt;
                     dvmCompilerAppendMIR(entry, boundCheckMIR);
                 }
             } else if (loopAnalysis->loopBranchOpcode == OP_IF_LEZ) {
                 /* Array index will fall below 0 */
                 if (globalMinC < -1) {
-                    MIR *boundCheckMIR = dvmCompilerNew(sizeof(MIR), true);
+                    MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
                     boundCheckMIR->dalvikInsn.opcode = kMirOpPunt;
                     dvmCompilerAppendMIR(entry, boundCheckMIR);
                 }
@@ -473,46 +486,61 @@
  */
 bool dvmCompilerLoopOpt(CompilationUnit *cUnit)
 {
-    LoopAnalysis *loopAnalysis = dvmCompilerNew(sizeof(LoopAnalysis), true);
+    LoopAnalysis *loopAnalysis =
+        (LoopAnalysis *)dvmCompilerNew(sizeof(LoopAnalysis), true);
 
-    assert(cUnit->blockList[0]->blockType == kTraceEntryBlock);
-    assert(cUnit->blockList[2]->blockType == kDalvikByteCode);
-    assert(cUnit->blockList[3]->blockType == kTraceExitBlock);
+    assert(((BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 0))
+                               ->blockType == kTraceEntryBlock);
+    assert(((BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 2))
+                               ->blockType == kDalvikByteCode);
+    assert(((BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList, 3))
+                               ->blockType == kTraceExitBlock);
 
     cUnit->loopAnalysis = loopAnalysis;
     /*
      * Find live-in variables to the loop body so that we can fake their
      * definitions in the entry block.
      */
-    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerFindLiveIn);
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerFindLocalLiveIn,
+                                          kAllNodes,
+                                          false /* isIterative */);
 
     /* Insert phi nodes to the loop body */
     handlePhiPlacement(cUnit);
 
-    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerDoSSAConversion);
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerDoSSAConversion,
+                                          kAllNodes,
+                                          false /* isIterative */);
     fillPhiNodeContents(cUnit);
 
     /* Constant propagation */
     cUnit->isConstantV = dvmAllocBitVector(cUnit->numSSARegs, false);
-    cUnit->constantValues = dvmCompilerNew(sizeof(int) * cUnit->numSSARegs,
-                                           true);
+    cUnit->constantValues =
+        (int *)dvmCompilerNew(sizeof(int) * cUnit->numSSARegs,
+                              true);
     dvmCompilerDataFlowAnalysisDispatcher(cUnit,
-                                          dvmCompilerDoConstantPropagation);
+                                          dvmCompilerDoConstantPropagation,
+                                          kAllNodes,
+                                          false /* isIterative */);
     DEBUG_LOOP(dumpConstants(cUnit);)
 
     /* Find induction variables - basic and dependent */
-    loopAnalysis->ivList = dvmCompilerNew(sizeof(GrowableList), true);
+    loopAnalysis->ivList =
+        (GrowableList *)dvmCompilerNew(sizeof(GrowableList), true);
     dvmInitGrowableList(loopAnalysis->ivList, 4);
     loopAnalysis->isIndVarV = dvmAllocBitVector(cUnit->numSSARegs, false);
     dvmCompilerDataFlowAnalysisDispatcher(cUnit,
-                                          dvmCompilerFindInductionVariables);
+                                          dvmCompilerFindInductionVariables,
+                                          kAllNodes,
+                                          false /* isIterative */);
     DEBUG_LOOP(dumpIVList(cUnit);)
 
     /* If the loop turns out to be non-optimizable, return early */
     if (!isLoopOptimizable(cUnit))
         return false;
 
-    loopAnalysis->arrayAccessInfo = dvmCompilerNew(sizeof(GrowableList), true);
+    loopAnalysis->arrayAccessInfo =
+        (GrowableList *)dvmCompilerNew(sizeof(GrowableList), true);
     dvmInitGrowableList(loopAnalysis->arrayAccessInfo, 4);
     loopAnalysis->bodyIsClean = doLoopBodyCodeMotion(cUnit);
     DEBUG_LOOP(dumpHoistedChecks(cUnit);)
diff --git a/vm/compiler/MethodSSATransformation.c b/vm/compiler/MethodSSATransformation.c
new file mode 100644
index 0000000..eaee24a
--- /dev/null
+++ b/vm/compiler/MethodSSATransformation.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "Dataflow.h"
+#include "Loop.h"
+#include "libdex/DexOpcodes.h"
+
+/* Enter the node to the dfsOrder list then visit its successors */
+static void recordDFSPreOrder(CompilationUnit *cUnit, BasicBlock *block)
+{
+
+    if (block->visited) return;
+    block->visited = true;
+
+    /* Enqueue the block id */
+    dvmInsertGrowableList(&cUnit->dfsOrder, block->id);
+
+    if (block->fallThrough) recordDFSPreOrder(cUnit, block->fallThrough);
+    if (block->taken) recordDFSPreOrder(cUnit, block->taken);
+    if (block->successorBlockList.blockListType != kNotUsed) {
+        GrowableListIterator iterator;
+        dvmGrowableListIteratorInit(&block->successorBlockList.blocks,
+                                    &iterator);
+        while (true) {
+            SuccessorBlockInfo *successorBlockInfo =
+                (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+            if (successorBlockInfo == NULL) break;
+            BasicBlock *succBB = successorBlockInfo->block;
+            recordDFSPreOrder(cUnit, succBB);
+        }
+    }
+    return;
+}
+
+/* Sort the blocks by the Depth-First-Search pre-order */
+static void computeDFSOrder(CompilationUnit *cUnit)
+{
+    /* Initialize the DFS order list */
+    dvmInitGrowableList(&cUnit->dfsOrder, cUnit->numBlocks);
+
+
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerClearVisitedFlag,
+                                          kAllNodes,
+                                          false /* isIterative */);
+
+    recordDFSPreOrder(cUnit, cUnit->entryBlock);
+    cUnit->numReachableBlocks = cUnit->dfsOrder.numUsed;
+}
+
+/*
+ * Mark block bit on the per-Dalvik register vector to denote that Dalvik
+ * register idx is defined in BasicBlock bb.
+ */
+static bool fillDefBlockMatrix(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    if (bb->dataFlowInfo == NULL) return false;
+
+    BitVectorIterator iterator;
+
+    dvmBitVectorIteratorInit(bb->dataFlowInfo->defV, &iterator);
+    while (true) {
+        int idx = dvmBitVectorIteratorNext(&iterator);
+        if (idx == -1) break;
+        /* Block bb defines register idx */
+        dvmCompilerSetBit(cUnit->defBlockMatrix[idx], bb->id);
+    }
+    return true;
+}
+
+static void computeDefBlockMatrix(CompilationUnit *cUnit)
+{
+    int numRegisters = cUnit->numDalvikRegisters;
+    /* Allocate numDalvikRegisters bit vector pointers */
+    cUnit->defBlockMatrix = (BitVector **)
+        dvmCompilerNew(sizeof(BitVector *) * numRegisters, true);
+    int i;
+
+    /* Initialize numRegister vectors with numBlocks bits each */
+    for (i = 0; i < numRegisters; i++) {
+        cUnit->defBlockMatrix[i] = dvmCompilerAllocBitVector(cUnit->numBlocks,
+                                                             false);
+    }
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerFindLocalLiveIn,
+                                          kAllNodes,
+                                          false /* isIterative */);
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, fillDefBlockMatrix,
+                                          kAllNodes,
+                                          false /* isIterative */);
+
+    /*
+     * Also set the incoming parameters as defs in the entry block.
+     * Only need to handle the parameters for the outer method.
+     */
+    int inReg = cUnit->method->registersSize - cUnit->method->insSize;
+    for (; inReg < cUnit->method->registersSize; inReg++) {
+        dvmCompilerSetBit(cUnit->defBlockMatrix[inReg],
+                          cUnit->entryBlock->id);
+    }
+}
+
+/* Compute the post-order traversal of the CFG */
+static void computeDomPostOrderTraversal(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    BitVectorIterator bvIterator;
+    dvmBitVectorIteratorInit(bb->iDominated, &bvIterator);
+    GrowableList *blockList = &cUnit->blockList;
+
+    /* Iterate through the dominated blocks first */
+    while (true) {
+        int bbIdx = dvmBitVectorIteratorNext(&bvIterator);
+        if (bbIdx == -1) break;
+        BasicBlock *dominatedBB =
+            (BasicBlock *) dvmGrowableListGetElement(blockList, bbIdx);
+        computeDomPostOrderTraversal(cUnit, dominatedBB);
+    }
+
+    /* Enter the current block id */
+    dvmInsertGrowableList(&cUnit->domPostOrderTraversal, bb->id);
+
+    /* hacky loop detection */
+    if (bb->taken && dvmIsBitSet(bb->dominators, bb->taken->id)) {
+        cUnit->hasLoop = true;
+    }
+}
+
+/* Worker function to compute the dominance frontier */
+static bool computeDominanceFrontier(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    GrowableList *blockList = &cUnit->blockList;
+
+    /* Calculate DF_local */
+    if (bb->taken && !dvmIsBitSet(bb->taken->dominators, bb->id)) {
+        dvmSetBit(bb->domFrontier, bb->taken->id);
+    }
+    if (bb->fallThrough &&
+        !dvmIsBitSet(bb->fallThrough->dominators, bb->id)) {
+        dvmSetBit(bb->domFrontier, bb->fallThrough->id);
+    }
+    if (bb->successorBlockList.blockListType != kNotUsed) {
+        GrowableListIterator iterator;
+        dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
+                                    &iterator);
+        while (true) {
+            SuccessorBlockInfo *successorBlockInfo =
+                (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+            if (successorBlockInfo == NULL) break;
+            BasicBlock *succBB = successorBlockInfo->block;
+            if (!dvmIsBitSet(succBB->dominators, bb->id)) {
+                dvmSetBit(bb->domFrontier, succBB->id);
+            }
+        }
+    }
+
+    /* Calculate DF_up */
+    BitVectorIterator bvIterator;
+    dvmBitVectorIteratorInit(bb->iDominated, &bvIterator);
+    while (true) {
+        int dominatedIdx = dvmBitVectorIteratorNext(&bvIterator);
+        if (dominatedIdx == -1) break;
+        BasicBlock *dominatedBB = (BasicBlock *)
+            dvmGrowableListGetElement(blockList, dominatedIdx);
+        BitVectorIterator dfIterator;
+        dvmBitVectorIteratorInit(dominatedBB->domFrontier, &dfIterator);
+        while (true) {
+            int dfUpIdx = dvmBitVectorIteratorNext(&dfIterator);
+            if (dfUpIdx == -1) break;
+            BasicBlock *dfUpBlock = (BasicBlock *)
+                dvmGrowableListGetElement(blockList, dfUpIdx);
+            if (!dvmIsBitSet(dfUpBlock->dominators, bb->id)) {
+                dvmSetBit(bb->domFrontier, dfUpBlock->id);
+            }
+        }
+    }
+    return true;
+}
+
+/* Worker function for initializing domination-related data structures */
+static bool initializeDominationInfo(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    int numTotalBlocks = cUnit->blockList.numUsed;
+
+    bb->dominators = dvmCompilerAllocBitVector(numTotalBlocks,
+                                               false /* expandable */);
+    bb->iDominated = dvmCompilerAllocBitVector(numTotalBlocks,
+                                               false /* expandable */);
+    bb->domFrontier = dvmCompilerAllocBitVector(numTotalBlocks,
+                                               false /* expandable */);
+    /* Set all bits in the dominator vector */
+    dvmSetInitialBits(bb->dominators, numTotalBlocks);
+
+    return true;
+}
+
+/* Worker function to compute each block's dominators */
+static bool computeBlockDominators(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    GrowableList *blockList = &cUnit->blockList;
+    int numTotalBlocks = blockList->numUsed;
+    BitVector *tempBlockV = cUnit->tempBlockV;
+    BitVectorIterator bvIterator;
+
+    /*
+     * The dominator of the entry block has been preset to itself and we need
+     * to skip the calculation here.
+     */
+    if (bb == cUnit->entryBlock) return false;
+
+    dvmSetInitialBits(tempBlockV, numTotalBlocks);
+
+    /* Iterate through the predecessors */
+    dvmBitVectorIteratorInit(bb->predecessors, &bvIterator);
+    while (true) {
+        int predIdx = dvmBitVectorIteratorNext(&bvIterator);
+        if (predIdx == -1) break;
+        BasicBlock *predBB = (BasicBlock *) dvmGrowableListGetElement(
+                                 blockList, predIdx);
+        /* tempBlockV = tempBlockV ^ dominators */
+        dvmIntersectBitVectors(tempBlockV, tempBlockV, predBB->dominators);
+    }
+    dvmSetBit(tempBlockV, bb->id);
+    if (dvmCompareBitVectors(tempBlockV, bb->dominators)) {
+        dvmCopyBitVector(bb->dominators, tempBlockV);
+        return true;
+    }
+    return false;
+}
+
+/* Worker function to compute the idom */
+static bool computeImmediateDominator(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    GrowableList *blockList = &cUnit->blockList;
+    BitVector *tempBlockV = cUnit->tempBlockV;
+    BitVectorIterator bvIterator;
+    BasicBlock *iDom;
+
+    if (bb == cUnit->entryBlock) return false;
+
+    dvmCopyBitVector(tempBlockV, bb->dominators);
+    dvmClearBit(tempBlockV, bb->id);
+    dvmBitVectorIteratorInit(tempBlockV, &bvIterator);
+
+    /* Should not see any dead block */
+    assert(dvmCountSetBits(tempBlockV) != 0);
+    if (dvmCountSetBits(tempBlockV) == 1) {
+        iDom = (BasicBlock *) dvmGrowableListGetElement(
+                       blockList, dvmBitVectorIteratorNext(&bvIterator));
+        bb->iDom = iDom;
+    } else {
+        int iDomIdx = dvmBitVectorIteratorNext(&bvIterator);
+        assert(iDomIdx != -1);
+        while (true) {
+            int nextDom = dvmBitVectorIteratorNext(&bvIterator);
+            if (nextDom == -1) break;
+            BasicBlock *nextDomBB = (BasicBlock *)
+                dvmGrowableListGetElement(blockList, nextDom);
+            /* iDom dominates nextDom - set new iDom */
+            if (dvmIsBitSet(nextDomBB->dominators, iDomIdx)) {
+                iDomIdx = nextDom;
+            }
+
+        }
+        iDom = (BasicBlock *) dvmGrowableListGetElement(blockList, iDomIdx);
+        /* Set the immediate dominator block for bb */
+        bb->iDom = iDom;
+    }
+    /* Add bb to the iDominated set of the immediate dominator block */
+    dvmCompilerSetBit(iDom->iDominated, bb->id);
+    return true;
+}
+
+/* Compute dominators, immediate dominator, and dominance fronter */
+static void computeDominators(CompilationUnit *cUnit)
+{
+    int numReachableBlocks = cUnit->numReachableBlocks;
+    int numTotalBlocks = cUnit->blockList.numUsed;
+
+    /* Initialize domination-related data structures */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, initializeDominationInfo,
+                                          kReachableNodes,
+                                          false /* isIterative */);
+
+    /* Set the dominator for the root node */
+    dvmClearAllBits(cUnit->entryBlock->dominators);
+    dvmSetBit(cUnit->entryBlock->dominators, cUnit->entryBlock->id);
+
+    cUnit->tempBlockV = dvmCompilerAllocBitVector(numTotalBlocks,
+                                              false /* expandable */);
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, computeBlockDominators,
+                                          kPreOrderDFSTraversal,
+                                          true /* isIterative */);
+
+    cUnit->entryBlock->iDom = NULL;
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, computeImmediateDominator,
+                                          kReachableNodes,
+                                          false /* isIterative */);
+
+    /*
+     * Now go ahead and compute the post order traversal based on the
+     * iDominated sets.
+     */
+    dvmInitGrowableList(&cUnit->domPostOrderTraversal, numReachableBlocks);
+    computeDomPostOrderTraversal(cUnit, cUnit->entryBlock);
+    assert(cUnit->domPostOrderTraversal.numUsed ==
+           (unsigned) cUnit->numReachableBlocks);
+
+    /* Now compute the dominance frontier for each block */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, computeDominanceFrontier,
+                                          kPostOrderDOMTraversal,
+                                          false /* isIterative */);
+}
+
+/*
+ * Perform dest U= src1 ^ ~src2
+ * This is probably not general enough to be placed in BitVector.[ch].
+ */
+static void computeSuccLiveIn(BitVector *dest,
+                              const BitVector *src1,
+                              const BitVector *src2)
+{
+    if (dest->storageSize != src1->storageSize ||
+        dest->storageSize != src2->storageSize ||
+        dest->expandable != src1->expandable ||
+        dest->expandable != src2->expandable) {
+        LOGE("Incompatible set properties");
+        dvmAbort();
+    }
+
+    unsigned int idx;
+    for (idx = 0; idx < dest->storageSize; idx++) {
+        dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
+    }
+}
+
+/*
+ * Iterate through all successor blocks and propagate up the live-in sets.
+ * The calculated result is used for phi-node pruning - where we only need to
+ * insert a phi node if the variable is live-in to the block.
+ */
+static bool computeBlockLiveIns(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    BitVector *tempDalvikRegisterV = cUnit->tempDalvikRegisterV;
+
+    if (bb->dataFlowInfo == NULL) return false;
+    dvmCopyBitVector(tempDalvikRegisterV, bb->dataFlowInfo->liveInV);
+    if (bb->taken && bb->taken->dataFlowInfo)
+        computeSuccLiveIn(tempDalvikRegisterV, bb->taken->dataFlowInfo->liveInV,
+                          bb->dataFlowInfo->defV);
+    if (bb->fallThrough && bb->fallThrough->dataFlowInfo)
+        computeSuccLiveIn(tempDalvikRegisterV,
+                          bb->fallThrough->dataFlowInfo->liveInV,
+                          bb->dataFlowInfo->defV);
+    if (bb->successorBlockList.blockListType != kNotUsed) {
+        GrowableListIterator iterator;
+        dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
+                                    &iterator);
+        while (true) {
+            SuccessorBlockInfo *successorBlockInfo =
+                (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
+            if (successorBlockInfo == NULL) break;
+            BasicBlock *succBB = successorBlockInfo->block;
+            if (succBB->dataFlowInfo) {
+                computeSuccLiveIn(tempDalvikRegisterV,
+                                  succBB->dataFlowInfo->liveInV,
+                                  bb->dataFlowInfo->defV);
+            }
+        }
+    }
+    if (dvmCompareBitVectors(tempDalvikRegisterV, bb->dataFlowInfo->liveInV)) {
+        dvmCopyBitVector(bb->dataFlowInfo->liveInV, tempDalvikRegisterV);
+        return true;
+    }
+    return false;
+}
+
+/* Insert phi nodes to for each variable to the dominance frontiers */
+static void insertPhiNodes(CompilationUnit *cUnit)
+{
+    int dalvikReg;
+    const GrowableList *blockList = &cUnit->blockList;
+    BitVector *phiBlocks =
+        dvmCompilerAllocBitVector(cUnit->numBlocks, false);
+    BitVector *tmpBlocks =
+        dvmCompilerAllocBitVector(cUnit->numBlocks, false);
+    BitVector *inputBlocks =
+        dvmCompilerAllocBitVector(cUnit->numBlocks, false);
+
+    cUnit->tempDalvikRegisterV =
+        dvmCompilerAllocBitVector(cUnit->numDalvikRegisters, false);
+
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, computeBlockLiveIns,
+                                          kPostOrderDFSTraversal,
+                                          true /* isIterative */);
+
+    /* Iterate through each Dalvik register */
+    for (dalvikReg = 0; dalvikReg < cUnit->numDalvikRegisters; dalvikReg++) {
+        bool change;
+        BitVectorIterator iterator;
+
+        dvmCopyBitVector(inputBlocks, cUnit->defBlockMatrix[dalvikReg]);
+        dvmClearAllBits(phiBlocks);
+        /* Calculate the phi blocks for each Dalvik register */
+        do {
+            change = false;
+            dvmClearAllBits(tmpBlocks);
+            dvmBitVectorIteratorInit(inputBlocks, &iterator);
+            while (true) {
+                int idx = dvmBitVectorIteratorNext(&iterator);
+                if (idx == -1) break;
+                BasicBlock *defBB =
+                    (BasicBlock *) dvmGrowableListGetElement(blockList, idx);
+                /* Merge the dominance frontier to tmpBlocks */
+                dvmUnifyBitVectors(tmpBlocks, tmpBlocks, defBB->domFrontier);
+            }
+            if (dvmCompareBitVectors(phiBlocks, tmpBlocks)) {
+                change = true;
+                dvmCopyBitVector(phiBlocks, tmpBlocks);
+
+                /*
+                 * Iterate through the original blocks plus the new ones in
+                 * the dominance frontier.
+                 */
+                dvmCopyBitVector(inputBlocks, phiBlocks);
+                dvmUnifyBitVectors(inputBlocks, inputBlocks,
+                                   cUnit->defBlockMatrix[dalvikReg]);
+            }
+        } while (change);
+
+        /*
+         * Insert a phi node for dalvikReg in the phiBlocks if the Dalvik
+         * register is in the live-in set.
+         */
+        dvmBitVectorIteratorInit(phiBlocks, &iterator);
+        while (true) {
+            int idx = dvmBitVectorIteratorNext(&iterator);
+            if (idx == -1) break;
+            BasicBlock *phiBB =
+                (BasicBlock *) dvmGrowableListGetElement(blockList, idx);
+            /* Variable will be clobbered before being used - no need for phi */
+            if (!dvmIsBitSet(phiBB->dataFlowInfo->liveInV, dalvikReg)) continue;
+            MIR *phi = (MIR *) dvmCompilerNew(sizeof(MIR), true);
+            phi->dalvikInsn.opcode = kMirOpPhi;
+            phi->dalvikInsn.vA = dalvikReg;
+            phi->offset = phiBB->startOffset;
+            dvmCompilerPrependMIR(phiBB, phi);
+        }
+    }
+}
+
+/*
+ * Worker function to insert phi-operands with latest SSA names from
+ * predecessor blocks
+ */
+static bool insertPhiNodeOperands(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    BitVector *ssaRegV = cUnit->tempSSARegisterV;
+    BitVectorIterator bvIterator;
+    GrowableList *blockList = &cUnit->blockList;
+    MIR *mir;
+
+    /* Phi nodes are at the beginning of each block */
+    for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+        if (mir->dalvikInsn.opcode != kMirOpPhi) return true;
+        int ssaReg = mir->ssaRep->defs[0];
+        int encodedDalvikValue =
+            (int) dvmGrowableListGetElement(cUnit->ssaToDalvikMap, ssaReg);
+        int dalvikReg = DECODE_REG(encodedDalvikValue);
+
+        dvmClearAllBits(ssaRegV);
+
+        /* Iterate through the predecessors */
+        dvmBitVectorIteratorInit(bb->predecessors, &bvIterator);
+        while (true) {
+            int predIdx = dvmBitVectorIteratorNext(&bvIterator);
+            if (predIdx == -1) break;
+            BasicBlock *predBB = (BasicBlock *) dvmGrowableListGetElement(
+                                     blockList, predIdx);
+            int encodedSSAValue =
+                predBB->dataFlowInfo->dalvikToSSAMap[dalvikReg];
+            int ssaReg = DECODE_REG(encodedSSAValue);
+            dvmSetBit(ssaRegV, ssaReg);
+        }
+
+        /* Count the number of SSA registers for a Dalvik register */
+        int numUses = dvmCountSetBits(ssaRegV);
+        mir->ssaRep->numUses = numUses;
+        mir->ssaRep->uses =
+            (int *) dvmCompilerNew(sizeof(int) * numUses, false);
+        mir->ssaRep->fpUse =
+            (bool *) dvmCompilerNew(sizeof(bool) * numUses, false);
+
+        BitVectorIterator phiIterator;
+
+        dvmBitVectorIteratorInit(ssaRegV, &phiIterator);
+        int *usePtr = mir->ssaRep->uses;
+
+        /* Set the uses array for the phi node */
+        while (true) {
+            int ssaRegIdx = dvmBitVectorIteratorNext(&phiIterator);
+            if (ssaRegIdx == -1) break;
+            *usePtr++ = ssaRegIdx;
+        }
+    }
+
+    return true;
+}
+
+/* Perform SSA transformation for the whole method */
+void dvmCompilerMethodSSATransformation(CompilationUnit *cUnit)
+{
+    /* Compute the DFS order */
+    computeDFSOrder(cUnit);
+
+    /* Compute the dominator info */
+    computeDominators(cUnit);
+
+    /* Allocate data structures in preparation for SSA conversion */
+    dvmInitializeSSAConversion(cUnit);
+
+    /* Find out the "Dalvik reg def x block" relation */
+    computeDefBlockMatrix(cUnit);
+
+    /* Insert phi nodes to dominance frontiers for all variables */
+    insertPhiNodes(cUnit);
+
+    /* Rename register names by local defs and phi nodes */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerDoSSAConversion,
+                                          kPreOrderDFSTraversal,
+                                          false /* isIterative */);
+
+    /*
+     * Shared temp bit vector used by each block to count the number of defs
+     * from all the predecessor blocks.
+     */
+    cUnit->tempSSARegisterV = dvmCompilerAllocBitVector(cUnit->numSSARegs,
+                                                        false);
+
+    /* Insert phi-operands with latest SSA names from predecessor blocks */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
+                                          kReachableNodes,
+                                          false /* isIterative */);
+}
diff --git a/vm/compiler/Ralloc.c b/vm/compiler/Ralloc.c
index 744bc32..d772a31 100644
--- a/vm/compiler/Ralloc.c
+++ b/vm/compiler/Ralloc.c
@@ -18,42 +18,6 @@
 #include "CompilerInternals.h"
 #include "Dataflow.h"
 
-typedef struct LiveRange {
-    int ssaName;
-    bool active;
-    int first;
-    int last;
-} LiveRange;
-
-static int computeLiveRange(LiveRange *list, BasicBlock *bb, int seqNum)
-{
-    MIR *mir;
-    int i;
-
-    if (bb->blockType != kDalvikByteCode &&
-        bb->blockType != kTraceEntryBlock)
-        return seqNum;
-
-    for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
-        SSARepresentation *ssaRep = mir->ssaRep;
-        mir->seqNum = seqNum;
-        if (ssaRep) {
-            for (i=0; i< ssaRep->numUses; i++) {
-                int reg = ssaRep->uses[i];
-                list[reg].first = MIN(list[reg].first, seqNum);
-                list[reg].active = true;
-            }
-            for (i=0; i< ssaRep->numDefs; i++) {
-                int reg = ssaRep->defs[i];
-                list[reg].last = MAX(list[reg].last, seqNum + 1);
-                list[reg].active = true;
-            }
-            seqNum += 2;
-        }
-    }
-    return seqNum;
-}
-
 /*
  * Quick & dirty - make FP usage sticky.  This is strictly a hint - local
  * code generation will handle misses.  It might be worthwhile to collaborate
@@ -83,46 +47,17 @@
     }
 }
 
-/*
- * Determine whether to use simple or aggressive register allocation.  In
- * general, loops and full methods will get aggressive.
- */
-static bool simpleTrace(CompilationUnit *cUnit)
-{
-    //TODO: flesh out
-    return true;
-}
-
-/*
- * Target-independent register allocation.  Requires target-dependent
- * helper functions and assumes free list, temp list and spill region.
- * Uses a variant of linear scan and produces a mapping between SSA names
- * and location.  Location may be original Dalvik register, hardware
- * register or spill location.
- *
- * Method:
- *    0.  Allocate the structure to hold the SSA name life ranges
- *    1.  Number each MIR instruction, counting by 2.
- *        +0 -> The "read" of the operands
- *        +1 -> The definition of the target resource
- *    2.  Compute live ranges for all SSA names *not* including the
- *        subscript 0 original Dalvik names.  Phi functions ignored
- *        at this point.
- *    3.  Sort the live range list by lowest range start.
- *    4.  Process and remove all Phi functions.
- *        o If there is no live range collisions among all operands and
- *          the target of a Phi function, collapse operands and target
- *          and rewrite using target SSA name.
- *        o If there is a collision, introduce copies.
- *    5.  Allocate in order of increasing live range start.
- */
 static const RegLocation freshLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
                                      INVALID_REG, INVALID_SREG};
-void dvmCompilerRegAlloc(CompilationUnit *cUnit)
+
+/*
+ * Local register allocation for simple traces.  Most of the work for
+ * local allocation is done on the fly.  Here we do some initialization
+ * and type inference.
+ */
+void dvmCompilerLocalRegAlloc(CompilationUnit *cUnit)
 {
     int i;
-    int seqNum = 0;
-    LiveRange *ranges;
     RegLocation *loc;
 
     /* Allocate the location map */
@@ -133,27 +68,19 @@
     }
     cUnit->regLocation = loc;
 
+    GrowableListIterator iterator;
+
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
     /* Do type inference pass */
-    for (i=0; i < cUnit->numBlocks; i++) {
-        inferTypes(cUnit, cUnit->blockList[i]);
+    while (true) {
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
+        inferTypes(cUnit, bb);
     }
 
-    if (simpleTrace(cUnit)) {
-        /*
-         * Just rename everything back to subscript 0 names and don't do
-         * any explicit promotion.  Local allocator will opportunistically
-         * promote on the fly.
-         */
-        for (i=0; i < cUnit->numSSARegs; i++) {
-            cUnit->regLocation[i].sRegLow =
+    /* Remap SSA names back to original frame locations. */
+    for (i=0; i < cUnit->numSSARegs; i++) {
+        cUnit->regLocation[i].sRegLow =
                 DECODE_REG(dvmConvertSSARegToDalvik(cUnit, loc[i].sRegLow));
-        }
-    } else {
-        // Compute live ranges
-        ranges = dvmCompilerNew(cUnit->numSSARegs * sizeof(*ranges), true);
-        for (i=0; i < cUnit->numSSARegs; i++)
-            ranges[i].active = false;
-        seqNum = computeLiveRange(ranges, cUnit->blockList[i], seqNum);
-        //TODO: phi squash & linear scan promotion
     }
 }
diff --git a/vm/compiler/Utility.c b/vm/compiler/Utility.c
index daeb893..7be57ef 100644
--- a/vm/compiler/Utility.c
+++ b/vm/compiler/Utility.c
@@ -82,7 +82,8 @@
             LOGI("Total arena pages for JIT: %d", numArenaBlocks);
         goto retry;
     }
-    return NULL;
+    /* Should not reach here */
+    dvmAbort();
 }
 
 /* Reclaim all the arena blocks allocated so far */
@@ -101,8 +102,8 @@
 {
     gList->numAllocated = initLength;
     gList->numUsed = 0;
-    gList->elemList = (void **) dvmCompilerNew(sizeof(void *) * initLength,
-                                               true);
+    gList->elemList = (intptr_t *) dvmCompilerNew(sizeof(intptr_t) * initLength,
+                                                  true);
 }
 
 /* Expand the capacity of a growable list */
@@ -114,14 +115,15 @@
     } else {
         newLength += 128;
     }
-    void *newArray = dvmCompilerNew(sizeof(void *) * newLength, true);
-    memcpy(newArray, gList->elemList, sizeof(void *) * gList->numAllocated);
+    intptr_t *newArray =
+        (intptr_t *) dvmCompilerNew(sizeof(intptr_t) * newLength, true);
+    memcpy(newArray, gList->elemList, sizeof(intptr_t) * gList->numAllocated);
     gList->numAllocated = newLength;
     gList->elemList = newArray;
 }
 
 /* Insert a new element into the growable list */
-void dvmInsertGrowableList(GrowableList *gList, void *elem)
+void dvmInsertGrowableList(GrowableList *gList, intptr_t elem)
 {
     assert(gList->numAllocated != 0);
     if (gList->numUsed == gList->numAllocated) {
@@ -130,10 +132,30 @@
     gList->elemList[gList->numUsed++] = elem;
 }
 
+void dvmGrowableListIteratorInit(GrowableList *gList,
+                                 GrowableListIterator *iterator)
+{
+    iterator->list = gList;
+    iterator->idx = 0;
+    iterator->size = gList->numUsed;
+}
+
+intptr_t dvmGrowableListIteratorNext(GrowableListIterator *iterator)
+{
+    assert(iterator->size == iterator->list->numUsed);
+    if (iterator->idx == iterator->size) return 0;
+    return iterator->list->elemList[iterator->idx++];
+}
+
+intptr_t dvmGrowableListGetElement(const GrowableList *gList, size_t idx)
+{
+    assert(idx < gList->numUsed);
+    return gList->elemList[idx];
+}
+
 /* Debug Utility - dump a compilation unit */
 void dvmCompilerDumpCompilationUnit(CompilationUnit *cUnit)
 {
-    int i;
     BasicBlock *bb;
     char *blockTypeNames[] = {
         "Normal Chaining Cell",
@@ -156,9 +178,13 @@
          cUnit->method->name);
     LOGD("%d insns", dvmGetMethodInsnsSize(cUnit->method));
     LOGD("%d blocks in total", cUnit->numBlocks);
+    GrowableListIterator iterator;
 
-    for (i = 0; i < cUnit->numBlocks; i++) {
-        bb = cUnit->blockList[i];
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
+
+    while (true) {
+        bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
         LOGD("Block %d (%s) (insn %04x - %04x%s)\n",
              bb->id,
              blockTypeNames[bb->blockType],
@@ -243,13 +269,12 @@
  * NOTE: this is the sister implementation of dvmAllocBitVector. In this version
  * memory is allocated from the compiler arena.
  */
-BitVector* dvmCompilerAllocBitVector(int startBits, bool expandable)
+BitVector* dvmCompilerAllocBitVector(unsigned int startBits, bool expandable)
 {
     BitVector* bv;
-    int count;
+    unsigned int count;
 
     assert(sizeof(bv->storage[0]) == 4);        /* assuming 32-bit units */
-    assert(startBits >= 0);
 
     bv = (BitVector*) dvmCompilerNew(sizeof(BitVector), false);
 
@@ -270,17 +295,16 @@
  * NOTE: this is the sister implementation of dvmSetBit. In this version
  * memory is allocated from the compiler arena.
  */
-bool dvmCompilerSetBit(BitVector *pBits, int num)
+bool dvmCompilerSetBit(BitVector *pBits, unsigned int num)
 {
-    assert(num >= 0);
-    if (num >= pBits->storageSize * (int)sizeof(u4) * 8) {
+    if (num >= pBits->storageSize * sizeof(u4) * 8) {
         if (!pBits->expandable)
-            return false;
+            dvmAbort();
 
         /* Round up to word boundaries for "num+1" bits */
-        int newSize = (num + 1 + 31) >> 5;
+        unsigned int newSize = (num + 1 + 31) >> 5;
         assert(newSize > pBits->storageSize);
-        u4 *newStorage = dvmCompilerNew(newSize * sizeof(u4), false);
+        u4 *newStorage = (u4*)dvmCompilerNew(newSize * sizeof(u4), false);
         memcpy(newStorage, pBits->storage, pBits->storageSize * sizeof(u4));
         memset(&newStorage[pBits->storageSize], 0,
                (newSize - pBits->storageSize) * sizeof(u4));
@@ -292,6 +316,35 @@
     return true;
 }
 
+/*
+ * Mark the specified bit as "unset".
+ *
+ * Returns "false" if the bit is outside the range of the vector and we're
+ * not allowed to expand.
+ *
+ * NOTE: this is the sister implementation of dvmClearBit. In this version
+ * memory is allocated from the compiler arena.
+ */
+bool dvmCompilerClearBit(BitVector *pBits, unsigned int num)
+{
+    if (num >= pBits->storageSize * sizeof(u4) * 8) {
+        LOGE("Trying to clear a bit that is not set in the vector yet!");
+        dvmAbort();
+    }
+
+    pBits->storage[num >> 5] &= ~(1 << (num & 0x1f));
+    return true;
+}
+
+/*
+ * If set is true, mark all bits as 1. Otherwise mark all bits as 0.
+ */
+void dvmCompilerMarkAllBits(BitVector *pBits, bool set)
+{
+    int value = set ? -1 : 0;
+    memset(pBits->storage, value, pBits->storageSize * (int)sizeof(u4));
+}
+
 void dvmDebugBitVector(char *msg, const BitVector *bv, int length)
 {
     int i;
@@ -315,3 +368,41 @@
      */
     longjmp(*cUnit->bailPtr, 1);
 }
+
+void dvmDumpBlockBitVector(const GrowableList *blocks, char *msg,
+                           const BitVector *bv, int length)
+{
+    int i;
+
+    LOGE("%s", msg);
+    for (i = 0; i < length; i++) {
+        if (dvmIsBitSet(bv, i)) {
+            BasicBlock *bb =
+                (BasicBlock *) dvmGrowableListGetElement(blocks, i);
+            char blockName[BLOCK_NAME_LEN];
+            dvmGetBlockName(bb, blockName);
+            LOGE("Bit %d / %s is set", i, blockName);
+        }
+    }
+}
+
+void dvmGetBlockName(BasicBlock *bb, char *name)
+{
+    switch (bb->blockType) {
+        case kMethodEntryBlock:
+            snprintf(name, BLOCK_NAME_LEN, "entry");
+            break;
+        case kMethodExitBlock:
+            snprintf(name, BLOCK_NAME_LEN, "exit");
+            break;
+        case kDalvikByteCode:
+            snprintf(name, BLOCK_NAME_LEN, "block%04x", bb->startOffset);
+            break;
+        case kExceptionHandling:
+            snprintf(name, BLOCK_NAME_LEN, "exception%04x", bb->startOffset);
+            break;
+        default:
+            snprintf(name, BLOCK_NAME_LEN, "??");
+            break;
+    }
+}
diff --git a/vm/compiler/codegen/CodegenFactory.c b/vm/compiler/codegen/CodegenFactory.c
index aad6512..ef7a0a9 100644
--- a/vm/compiler/codegen/CodegenFactory.c
+++ b/vm/compiler/codegen/CodegenFactory.c
@@ -57,7 +57,7 @@
     if (rlSrc.location == kLocPhysReg) {
         genRegCopy(cUnit, reg1, rlSrc.lowReg);
     } else  if (rlSrc.location == kLocRetval) {
-        loadWordDisp(cUnit, rGLUE, offsetof(InterpState, retval), reg1);
+        loadWordDisp(cUnit, rSELF, offsetof(Thread, retval), reg1);
     } else {
         assert(rlSrc.location == kLocDalvikFrame);
         loadWordDisp(cUnit, rFP, dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2,
@@ -90,7 +90,7 @@
     if (rlSrc.location == kLocPhysReg) {
         genRegCopyWide(cUnit, regLo, regHi, rlSrc.lowReg, rlSrc.highReg);
     } else if (rlSrc.location == kLocRetval) {
-        loadBaseDispWide(cUnit, NULL, rGLUE, offsetof(InterpState, retval),
+        loadBaseDispWide(cUnit, NULL, rSELF, offsetof(Thread, retval),
                          regLo, regHi, INVALID_SREG);
     } else {
         assert(rlSrc.location == kLocDalvikFrame);
@@ -124,7 +124,7 @@
         rlSrc.location = kLocPhysReg;
         dvmCompilerMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
     } else if (rlSrc.location == kLocRetval) {
-        loadWordDisp(cUnit, rGLUE, offsetof(InterpState, retval), rlSrc.lowReg);
+        loadWordDisp(cUnit, rSELF, offsetof(Thread, retval), rlSrc.lowReg);
         rlSrc.location = kLocPhysReg;
         dvmCompilerClobber(cUnit, rlSrc.lowReg);
     }
@@ -164,7 +164,7 @@
 
 
     if (rlDest.location == kLocRetval) {
-        storeBaseDisp(cUnit, rGLUE, offsetof(InterpState, retval),
+        storeBaseDisp(cUnit, rSELF, offsetof(Thread, retval),
                       rlDest.lowReg, kWord);
         dvmCompilerClobber(cUnit, rlDest.lowReg);
     } else {
@@ -192,7 +192,7 @@
         dvmCompilerMarkLive(cUnit, rlSrc.highReg,
                             dvmCompilerSRegHi(rlSrc.sRegLow));
     } else if (rlSrc.location == kLocRetval) {
-        loadBaseDispWide(cUnit, NULL, rGLUE, offsetof(InterpState, retval),
+        loadBaseDispWide(cUnit, NULL, rSELF, offsetof(Thread, retval),
                          rlSrc.lowReg, rlSrc.highReg, INVALID_SREG);
         rlSrc.location = kLocPhysReg;
         dvmCompilerClobber(cUnit, rlSrc.lowReg);
@@ -242,7 +242,7 @@
 
 
     if (rlDest.location == kLocRetval) {
-        storeBaseDispWide(cUnit, rGLUE, offsetof(InterpState, retval),
+        storeBaseDispWide(cUnit, rSELF, offsetof(Thread, retval),
                           rlDest.lowReg, rlDest.highReg);
         dvmCompilerClobber(cUnit, rlDest.lowReg);
         dvmCompilerClobber(cUnit, rlDest.highReg);
@@ -263,3 +263,33 @@
         }
     }
 }
+
+/*
+ * Load a class pointer value into a fixed or temp register.  Target
+ * register is clobbered, and marked inUse.
+ */
+static ArmLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value)
+{
+    ArmLIR *res;
+    cUnit->hasClassLiterals = true;
+    if (dvmCompilerIsTemp(cUnit, rDest)) {
+        dvmCompilerClobber(cUnit, rDest);
+        dvmCompilerMarkInUse(cUnit, rDest);
+    }
+    ArmLIR *dataTarget = scanLiteralPool(cUnit->classPointerList, value, 0);
+    if (dataTarget == NULL) {
+        dataTarget = addWordData(cUnit, &cUnit->classPointerList, value);
+        /* Counts the number of class pointers in this translation */
+        cUnit->numClassPointers++;
+    }
+    ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    loadPcRel->opcode = kThumb2LdrPcRel12;
+    loadPcRel->generic.target = (LIR *) dataTarget;
+    loadPcRel->operands[0] = rDest;
+    setupResourceMasks(loadPcRel);
+    setMemRefType(loadPcRel, true, kLiteral);
+    loadPcRel->aliasInfo = dataTarget->operands[0];
+    res = loadPcRel;
+    dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel);
+    return res;
+}
diff --git a/vm/compiler/codegen/CompilerCodegen.h b/vm/compiler/codegen/CompilerCodegen.h
index d871c3b..efa913f 100644
--- a/vm/compiler/codegen/CompilerCodegen.h
+++ b/vm/compiler/codegen/CompilerCodegen.h
@@ -28,9 +28,16 @@
 /* Lower middle-level IR to low-level IR */
 void dvmCompilerMIR2LIR(CompilationUnit *cUnit);
 
+/* Lower middle-level IR to low-level IR for the whole method */
+void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit);
+
 /* Assemble LIR into machine code */
 void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info);
 
+/* Install class objects in the literal pool */
+void dvmJitInstallClassObjectPointers(CompilationUnit *cUnit,
+                                      char *codeAddress);
+
 /* Patch inline cache content for polymorphic callsites */
 bool dvmJitPatchInlineCache(void *cellPtr, void *contentPtr);
 
@@ -38,13 +45,10 @@
 void dvmCompilerCodegenDump(CompilationUnit *cUnit);
 
 /* Implemented in the codegen/<target>/Assembler.c */
-void* dvmJitChain(void *tgtAddr, u4* branchAddr);
-u4* dvmJitUnchain(void *codeAddr);
-void dvmJitUnchainAll(void);
 void dvmCompilerPatchInlineCache(void);
 
 /* Implemented in codegen/<target>/Ralloc.c */
-void dvmCompilerRegAlloc(CompilationUnit *cUnit);
+void dvmCompilerLocalRegAlloc(CompilationUnit *cUnit);
 
 /* Implemented in codegen/<target>/Thumb<version>Util.c */
 void dvmCompilerInitializeRegAlloc(CompilationUnit *cUnit);
diff --git a/vm/compiler/codegen/Optimizer.h b/vm/compiler/codegen/Optimizer.h
index d42fe87..2b05476 100644
--- a/vm/compiler/codegen/Optimizer.h
+++ b/vm/compiler/codegen/Optimizer.h
@@ -29,6 +29,7 @@
     kTrackLiveTemps,
     kSuppressLoads,
     kMethodInlining,
+    kMethodJit,
 } optControlVector;
 
 /* Forward declarations */
diff --git a/vm/compiler/codegen/RallocUtil.c b/vm/compiler/codegen/RallocUtil.c
index 32977eb..27d1f05 100644
--- a/vm/compiler/codegen/RallocUtil.c
+++ b/vm/compiler/codegen/RallocUtil.c
@@ -364,10 +364,6 @@
     dvmCompilerAbort(cUnit);
 }
 
-/*
- * FIXME - this needs to also check the preserved pool once we start
- * start using preserved registers.
- */
 extern RegisterInfo *dvmCompilerIsLive(CompilationUnit *cUnit, int reg)
 {
     RegisterInfo *p = cUnit->regPool->coreTemps;
@@ -452,7 +448,7 @@
         LIR *p;
         assert(sReg1 == sReg2);
         for (p = start; ;p = p->next) {
-            ((ArmLIR *)p)->isNop = true;
+            ((ArmLIR *)p)->flags.isNop = true;
             if (p == finish)
                 break;
         }
diff --git a/vm/compiler/codegen/arm/ArchFactory.c b/vm/compiler/codegen/arm/ArchFactory.c
index 581ba39..805a6fc 100644
--- a/vm/compiler/codegen/arm/ArchFactory.c
+++ b/vm/compiler/codegen/arm/ArchFactory.c
@@ -32,7 +32,21 @@
                                TGT_LIR *pcrLabel)
 {
     TGT_LIR *branch = genCmpImmBranch(cUnit, cond, reg, checkValue);
-    return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+    if (cUnit->methodJitMode) {
+        BasicBlock *bb = cUnit->curBlock;
+        if (bb->taken) {
+            ArmLIR  *exceptionLabel = (ArmLIR *) cUnit->blockLabelList;
+            exceptionLabel += bb->taken->id;
+            branch->generic.target = (LIR *) exceptionLabel;
+            return exceptionLabel;
+        } else {
+            LOGE("Catch blocks not handled yet");
+            dvmAbort();
+            return NULL;
+        }
+    } else {
+        return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+    }
 }
 
 /*
diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c
index 0b76eb5..d94b1a7 100644
--- a/vm/compiler/codegen/arm/ArchUtility.c
+++ b/vm/compiler/codegen/arm/ArchUtility.c
@@ -25,18 +25,24 @@
     "ror"};
 
 /* Decode and print a ARM register name */
-static char * decodeRegList(int vector, char *buf)
+static char * decodeRegList(ArmOpcode opcode, int vector, char *buf)
 {
     int i;
     bool printed = false;
     buf[0] = 0;
-    for (i = 0; i < 8; i++, vector >>= 1) {
+    for (i = 0; i < 16; i++, vector >>= 1) {
         if (vector & 0x1) {
+            int regId = i;
+            if (opcode == kThumbPush && i == 8) {
+                regId = r14lr;
+            } else if (opcode == kThumbPop && i == 8) {
+                regId = r15pc;
+            }
             if (printed) {
-                sprintf(buf + strlen(buf), ", r%d", i);
+                sprintf(buf + strlen(buf), ", r%d", regId);
             } else {
                 printed = true;
-                sprintf(buf, "r%d", i);
+                sprintf(buf, "r%d", regId);
             }
         }
     }
@@ -189,9 +195,10 @@
                        }
                        break;
                    case 't':
-                       sprintf(tbuf,"0x%08x",
+                       sprintf(tbuf,"0x%08x (L%p)",
                                (int) baseAddr + lir->generic.offset + 4 +
-                               (operand << 1));
+                               (operand << 1),
+                               lir->generic.target);
                        break;
                    case 'u': {
                        int offset_1 = lir->operands[0];
@@ -209,7 +216,7 @@
                        strcpy(tbuf, "see above");
                        break;
                    case 'R':
-                       decodeRegList(operand, tbuf);
+                       decodeRegList(lir->opcode, operand, tbuf);
                        break;
                    default:
                        strcpy(tbuf,"DecodeError");
@@ -256,10 +263,22 @@
         if (mask & ENCODE_FP_STATUS) {
             strcat(buf, "fpcc ");
         }
+
+        /* Memory bits */
         if (armLIR && (mask & ENCODE_DALVIK_REG)) {
             sprintf(buf + strlen(buf), "dr%d%s", armLIR->aliasInfo & 0xffff,
                     (armLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
         }
+        if (mask & ENCODE_LITERAL) {
+            strcat(buf, "lit ");
+        }
+
+        if (mask & ENCODE_HEAP_REF) {
+            strcat(buf, "heap ");
+        }
+        if (mask & ENCODE_MUST_NOT_ALIAS) {
+            strcat(buf, "noalias ");
+        }
     }
     if (buf[0]) {
         LOGD("%s: %s", prefix, buf);
@@ -296,8 +315,6 @@
         case kArmPseudoSSARep:
             DUMP_SSA_REP(LOGD("-------- %s\n", (char *) dest));
             break;
-        case kArmPseudoTargetLabel:
-            break;
         case kArmPseudoChainingCellBackwardBranch:
             LOGD("-------- chaining cell (backward branch): 0x%04x\n", dest);
             break;
@@ -338,11 +355,12 @@
         case kArmPseudoEHBlockLabel:
             LOGD("Exception_Handling:\n");
             break;
+        case kArmPseudoTargetLabel:
         case kArmPseudoNormalBlockLabel:
-            LOGD("L%#06x:\n", dest);
+            LOGD("L%p:\n", lir);
             break;
         default:
-            if (lir->isNop && !dumpNop) {
+            if (lir->flags.isNop && !dumpNop) {
                 break;
             }
             buildInsnString(EncodingMap[lir->opcode].name, lir, opName,
@@ -351,15 +369,15 @@
                             256);
             LOGD("%p (%04x): %-8s%s%s\n",
                  baseAddr + offset, offset, opName, buf,
-                 lir->isNop ? "(nop)" : "");
+                 lir->flags.isNop ? "(nop)" : "");
             break;
     }
 
-    if (lir->useMask && (!lir->isNop || dumpNop)) {
+    if (lir->useMask && (!lir->flags.isNop || dumpNop)) {
         DUMP_RESOURCE_MASK(dvmDumpResourceMask((LIR *) lir,
                                                lir->useMask, "use"));
     }
-    if (lir->defMask && (!lir->isNop || dumpNop)) {
+    if (lir->defMask && (!lir->flags.isNop || dumpNop)) {
         DUMP_RESOURCE_MASK(dvmDumpResourceMask((LIR *) lir,
                                                lir->defMask, "def"));
     }
@@ -375,9 +393,16 @@
     LOGD("installed code is at %p\n", cUnit->baseAddr);
     LOGD("total size is %d bytes\n", cUnit->totalSize);
     for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
-        dvmDumpLIRInsn(lirInsn, cUnit->baseAddr);
+        dvmDumpLIRInsn(lirInsn, (unsigned char *) cUnit->baseAddr);
     }
-    for (lirInsn = cUnit->wordList; lirInsn; lirInsn = lirInsn->next) {
+    for (lirInsn = cUnit->classPointerList; lirInsn; lirInsn = lirInsn->next) {
+        armLIR = (ArmLIR *) lirInsn;
+        LOGD("%p (%04x): .class (%s)\n",
+             (char*)cUnit->baseAddr + armLIR->generic.offset,
+             armLIR->generic.offset,
+             ((CallsiteInfo *) armLIR->operands[0])->classDescriptor);
+    }
+    for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
         armLIR = (ArmLIR *) lirInsn;
         LOGD("%p (%04x): .word (0x%x)\n",
              (char*)cUnit->baseAddr + armLIR->generic.offset,
@@ -385,3 +410,9 @@
              armLIR->operands[0]);
     }
 }
+
+/* Target-specific cache flushing */
+int dvmCompilerCacheFlush(long start, long end, long flags)
+{
+    return cacheflush(start, end, flags);
+}
diff --git a/vm/compiler/codegen/arm/ArmLIR.h b/vm/compiler/codegen/arm/ArmLIR.h
index 213344c..c47c291 100644
--- a/vm/compiler/codegen/arm/ArmLIR.h
+++ b/vm/compiler/codegen/arm/ArmLIR.h
@@ -24,7 +24,7 @@
  * r0, r1, r2, r3 are always scratch
  * r4 (rPC) is scratch for Jit, but most be restored when resuming interp
  * r5 (rFP) is reserved [holds Dalvik frame pointer]
- * r6 (rGLUE) is reserved [holds current &interpState]
+ * r6 (rSELF) is reserved [holds current &Thread]
  * r7 (rINST) is scratch for Jit
  * r8 (rIBASE) is scratch for Jit, but must be restored when resuming interp
  * r9 is reserved
@@ -119,10 +119,6 @@
     int numFPTemps;
     RegisterInfo *FPTemps;
     int nextFPTemp;
-    int numCoreRegs;
-    RegisterInfo *coreRegs;
-    int numFPRegs;
-    RegisterInfo *FPRegs;
 } RegisterPool;
 
 typedef enum ResourceEncodingPos {
@@ -133,12 +129,12 @@
     kFPReg0     = 16,
     kRegEnd     = 48,
     kCCode      = kRegEnd,
-    kFPStatus,
-    kDalvikReg,
-    kLiteral,
-    kFrameRef,
-    kHeapRef,
-    kLitPoolRef
+    kFPStatus,          // FP status word
+    // The following four bits are for memory disambiguation
+    kDalvikReg,         // 1 Dalvik Frame (can be fully disambiguated)
+    kLiteral,           // 2 Literal pool (can be fully disambiguated)
+    kHeapRef,           // 3 Somewhere on the heap (alias with any other heap)
+    kMustNotAlias,      // 4 Guaranteed to be non-alias (eg *(r6+x))
 } ResourceEncodingPos;
 
 #define ENCODE_REG_LIST(N)      ((u8) N)
@@ -148,19 +144,15 @@
 #define ENCODE_CCODE            (1ULL << kCCode)
 #define ENCODE_FP_STATUS        (1ULL << kFPStatus)
 
-    /* Must alias */
+/* Abstract memory locations */
 #define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
 #define ENCODE_LITERAL          (1ULL << kLiteral)
-
-    /* May alias */
-#define ENCODE_FRAME_REF        (1ULL << kFrameRef)
 #define ENCODE_HEAP_REF         (1ULL << kHeapRef)
-#define ENCODE_LITPOOL_REF      (1ULL << kLitPoolRef)
+#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
 
 #define ENCODE_ALL              (~0ULL)
-#define ENCODE_MEM_DEF          (ENCODE_FRAME_REF | ENCODE_HEAP_REF)
-#define ENCODE_MEM_USE          (ENCODE_FRAME_REF | ENCODE_HEAP_REF \
-                                 | ENCODE_LITPOOL_REF)
+#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
+                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
 
 #define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
 #define DECODE_ALIAS_INFO_WIDE(X)       ((X & 0x80000000) ? 1 : 0)
@@ -211,23 +203,33 @@
     kOpUncondBr,
 } OpKind;
 
+/*
+ * Annotate special-purpose core registers:
+ *   - VM: r4PC, r5FP, and r6SELF
+ *   - ARM architecture: r13sp, r14lr, and r15pc
+ *
+ * rPC, rFP, and rSELF are for architecture-independent code to use.
+ */
 typedef enum NativeRegisterPool {
-    r0 = 0,
-    r1 = 1,
-    r2 = 2,
-    r3 = 3,
-    r4PC = 4,
-    rFP = 5,
-    rGLUE = 6,
-    r7 = 7,
-    r8 = 8,
-    r9 = 9,
-    r10 = 10,
-    r11 = 11,
-    r12 = 12,
-    r13 = 13,
-    rlr = 14,
-    rpc = 15,
+    r0     = 0,
+    r1     = 1,
+    r2     = 2,
+    r3     = 3,
+    rPC    = 4,
+    r4PC   = rPC,
+    rFP    = 5,
+    r5FP   = rFP,
+    rSELF  = 6,
+    r6SELF = rSELF,
+    r7     = 7,
+    r8     = 8,
+    r9     = 9,
+    r10    = 10,
+    r11    = 11,
+    r12    = 12,
+    r13sp  = 13,
+    r14lr  = 14,
+    r15pc  = 15,
     fr0  =  0 + FP_REG_OFFSET,
     fr1  =  1 + FP_REG_OFFSET,
     fr2  =  2 + FP_REG_OFFSET,
@@ -519,7 +521,7 @@
     kThumb2StrbRRI12,    /* strb rt,[rn,#imm12] [111110001000]
                                        rt[15..12] rn[19..16] imm12[11..0] */
     kThumb2Pop,          /* pop     [1110100010111101] list[15-0]*/
-    kThumb2Push,         /* push    [1110100010101101] list[15-0]*/
+    kThumb2Push,         /* push    [1110100100101101] list[15-0]*/
     kThumb2CmpRI8,       /* cmp rn, #<const> [11110] i [011011] rn[19-16] [0]
                                        imm3 [1111] imm8[7..0] */
     kThumb2AdcRRR,       /* adc [111010110101] rn[19..16] [0000] rd[11..8]
@@ -623,6 +625,8 @@
     kThumb2Bfc,          /* bfc [11110011011011110] [0] imm3[14-12]
                                   rd[11-8] imm2[7-6] [0] msb[4-0] */
     kThumb2Dmb,          /* dmb [1111001110111111100011110101] option[3-0] */
+    kThumb2LdrPcReln12,  /* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
+                                  imm12[11-0] */
 
     kArmLast,
 } ArmOpcode;
@@ -759,15 +763,17 @@
 typedef struct ArmLIR {
     LIR generic;
     ArmOpcode opcode;
-    int operands[4];    // [0..3] = [dest, src1, src2, extra]
-    bool isNop;         // LIR is optimized away
-    bool branchInsertSV;// mark for insertion of branch before this instruction,
-                        // used to identify mem ops for self verification mode
-    int age;            // default is 0, set lazily by the optimizer
-    int size;           // 16-bit unit size (1 for thumb, 1 or 2 for thumb2)
-    int aliasInfo;      // For Dalvik register access & litpool disambiguation
-    u8 useMask;         // Resource mask for use
-    u8 defMask;         // Resource mask for def
+    int operands[4];            // [0..3] = [dest, src1, src2, extra]
+    struct {
+        bool isNop:1;           // LIR is optimized away
+        bool insertWrapper:1;   // insert branch to emulate memory accesses
+        unsigned int age:4;     // default is 0, set lazily by the optimizer
+        unsigned int size:3;    // bytes (2 for thumb, 2/4 for thumb2)
+        unsigned int unused:23;
+    } flags;
+    int aliasInfo;              // For Dalvik register & litpool disambiguation
+    u8 useMask;                 // Resource mask for use
+    u8 defMask;                 // Resource mask for def
 } ArmLIR;
 
 /* Init values when a predicted chain is initially assembled */
diff --git a/vm/compiler/codegen/arm/ArmRallocUtil.c b/vm/compiler/codegen/arm/ArmRallocUtil.c
index bc643c1..3a5afa2 100644
--- a/vm/compiler/codegen/arm/ArmRallocUtil.c
+++ b/vm/compiler/codegen/arm/ArmRallocUtil.c
@@ -29,7 +29,7 @@
  * Register usage for 16-bit Thumb systems:
  *     r0-r3: Temp/argument
  *     lr(r14):      Temp for translations, return address for handlers
- *     rGLUE(r6):    Pointer to InterpState
+ *     rSELF(r6):    Pointer to Thread
  *     rFP(r5):      Dalvik frame pointer
  *     r4, r7:       Temp for translations
  *     r8, r9, r10:   Temp preserved across C calls
@@ -38,7 +38,7 @@
  * Register usage for 32-bit Thumb systems:
  *     r0-r3: Temp/argument
  *     lr(r14):      Temp for translations, return address for handlers
- *     rGLUE(r6):    Pointer to InterpState
+ *     rSELF(r6):    Pointer to Thread
  *     rFP(r5):      Dalvik frame pointer
  *     r4, r7:       Temp for translations
  *     r8, r9, r10   Temp preserved across C calls
@@ -58,7 +58,7 @@
     dvmCompilerClobber(cUnit, r9); // Need to do this?, be conservative
     dvmCompilerClobber(cUnit, r11);
     dvmCompilerClobber(cUnit, r12);
-    dvmCompilerClobber(cUnit, rlr);
+    dvmCompilerClobber(cUnit, r14lr);
 }
 
 /* Clobber all of the temps that might be used by a handler. */
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index a1f47ac..9b24b75 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -20,7 +20,6 @@
 #include "../../CompilerInternals.h"
 #include "ArmLIR.h"
 #include "Codegen.h"
-#include <unistd.h>             /* for cacheflush */
 #include <sys/mman.h>           /* for protection change */
 
 #define MAX_ASSEMBLER_RETRIES 10
@@ -642,7 +641,7 @@
                  kFmtUnused, -1, -1,
                  IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
                  | IS_LOAD, "pop", "<!0R>", 2),
-    ENCODING_MAP(kThumb2Push,          0xe8ad0000,
+    ENCODING_MAP(kThumb2Push,          0xe92d0000,
                  kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
                  IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
@@ -789,7 +788,7 @@
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
                  IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
-                 "ldr", "r!0d, [rpc, #!1d]", 2),
+                 "ldr", "r!0d, [r15pc, #!1d]", 2),
     ENCODING_MAP(kThumb2BCond,        0xf0008000,
                  kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
@@ -877,11 +876,16 @@
                  kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP,
                  "dmb","#!0B",2),
+    ENCODING_MAP(kThumb2LdrPcReln12,       0xf85f0000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+                 "ldr", "r!0d, [r15pc, -#!1d]", 2),
 };
 
 /*
  * The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
- * not ready. Since r5 (rFP) is not updated often, it is less likely to
+ * not ready. Since r5FP is not updated often, it is less likely to
  * generate unnecessary stall cycles.
  */
 #define PADDING_MOV_R5_R5               0x1C2D
@@ -893,30 +897,28 @@
 #define UPDATE_CODE_CACHE_PATCHES()
 #endif
 
-/* Write the numbers in the literal pool to the codegen stream */
-static void installDataContent(CompilationUnit *cUnit)
+/* Write the numbers in the constant and class pool to the output stream */
+static void installLiteralPools(CompilationUnit *cUnit)
 {
     int *dataPtr = (int *) ((char *) cUnit->baseAddr + cUnit->dataOffset);
-    ArmLIR *dataLIR = (ArmLIR *) cUnit->wordList;
+    /* Install number of class pointer literals */
+    *dataPtr++ = cUnit->numClassPointers;
+    ArmLIR *dataLIR = (ArmLIR *) cUnit->classPointerList;
+    while (dataLIR) {
+        /*
+         * Install the callsiteinfo pointers into the cells for now. They will
+         * be converted into real pointers in dvmJitInstallClassObjectPointers.
+         */
+        *dataPtr++ = dataLIR->operands[0];
+        dataLIR = NEXT_LIR(dataLIR);
+    }
+    dataLIR = (ArmLIR *) cUnit->literalList;
     while (dataLIR) {
         *dataPtr++ = dataLIR->operands[0];
         dataLIR = NEXT_LIR(dataLIR);
     }
 }
 
-/* Returns the size of a Jit trace description */
-static int jitTraceDescriptionSize(const JitTraceDescription *desc)
-{
-    int runCount;
-    /* Trace end is always of non-meta type (ie isCode == true) */
-    for (runCount = 0; ; runCount++) {
-        if (desc->trace[runCount].frag.isCode &&
-            desc->trace[runCount].frag.runEnd)
-           break;
-    }
-    return sizeof(JitTraceDescription) + ((runCount+1) * sizeof(JitTraceRun));
-}
-
 /*
  * Assemble the LIR into binary instruction format.  Note that we may
  * discover that pc-relative displacements may not fit the selected
@@ -939,14 +941,14 @@
             continue;
         }
 
-        if (lir->isNop) {
+        if (lir->flags.isNop) {
             continue;
         }
 
         if (lir->opcode == kThumbLdrPcRel ||
             lir->opcode == kThumb2LdrPcRel12 ||
             lir->opcode == kThumbAddPcRel ||
-            ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == rpc))) {
+            ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
             ArmLIR *lirTarget = (ArmLIR *) lir->generic.target;
             intptr_t pc = (lir->generic.offset + 4) & ~3;
             intptr_t target = lirTarget->generic.offset;
@@ -973,7 +975,8 @@
             int delta = target - pc;
             if (delta > 126 || delta < 0) {
                 /* Convert to cmp rx,#0 / b[eq/ne] tgt pair */
-                ArmLIR *newInst = dvmCompilerNew(sizeof(ArmLIR), true);
+                ArmLIR *newInst =
+                    (ArmLIR *)dvmCompilerNew(sizeof(ArmLIR), true);
                 /* Make new branch instruction and insert after */
                 newInst->opcode = kThumbBCond;
                 newInst->operands[0] = 0;
@@ -1027,6 +1030,17 @@
 
             lir->operands[0] = (delta >> 12) & 0x7ff;
             NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+        } else if (lir->opcode == kThumbBl1) {
+            assert(NEXT_LIR(lir)->opcode == kThumbBl2);
+            /* Both curPC and target are Thumb */
+            intptr_t curPC = startAddr + lir->generic.offset + 4;
+            intptr_t target = lir->operands[1];
+
+            int delta = target - curPC;
+            assert((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+            lir->operands[0] = (delta >> 12) & 0x7ff;
+            NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
         }
 
         ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
@@ -1130,6 +1144,157 @@
     return kSuccess;
 }
 
+static int assignLiteralOffsetCommon(LIR *lir, int offset)
+{
+    for (;lir != NULL; lir = lir->next) {
+        lir->offset = offset;
+        offset += 4;
+    }
+    return offset;
+}
+
+/* Determine the offset of each literal field */
+static int assignLiteralOffset(CompilationUnit *cUnit, int offset)
+{
+    /* Reserved for the size field of class pointer pool */
+    offset += 4;
+    offset = assignLiteralOffsetCommon(cUnit->classPointerList, offset);
+    offset = assignLiteralOffsetCommon(cUnit->literalList, offset);
+    return offset;
+}
+
+/*
+ * Translation layout in the code cache.  Note that the codeAddress pointer
+ * in JitTable will point directly to the code body (field codeAddress).  The
+ * chain cell offset codeAddress - 2, and the address of the trace profile
+ * counter is at codeAddress - 6.
+ *
+ *      +----------------------------+
+ *      | Trace Profile Counter addr |  -> 4 bytes (PROF_COUNTER_ADDR_SIZE)
+ *      +----------------------------+
+ *   +--| Offset to chain cell counts|  -> 2 bytes (CHAIN_CELL_OFFSET_SIZE)
+ *   |  +----------------------------+
+ *   |  | Trace profile code         |  <- entry point when profiling
+ *   |  .  -   -   -   -   -   -   - .
+ *   |  | Code body                  |  <- entry point when not profiling
+ *   |  .                            .
+ *   |  |                            |
+ *   |  +----------------------------+
+ *   |  | Chaining Cells             |  -> 12/16 bytes, 4 byte aligned
+ *   |  .                            .
+ *   |  .                            .
+ *   |  |                            |
+ *   |  +----------------------------+
+ *   |  | Gap for large switch stmt  |  -> # cases >= MAX_CHAINED_SWITCH_CASES
+ *   |  +----------------------------+
+ *   +->| Chaining cell counts       |  -> 8 bytes, chain cell counts by type
+ *      +----------------------------+
+ *      | Trace description          |  -> variable sized
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *      | # Class pointer pool size  |  -> 4 bytes
+ *      +----------------------------+
+ *      | Class pointer pool         |  -> 4-byte aligned, variable size
+ *      .                            .
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *      | Literal pool               |  -> 4-byte aligned, variable size
+ *      .                            .
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *
+ */
+
+#define PROF_COUNTER_ADDR_SIZE 4
+#define CHAIN_CELL_OFFSET_SIZE 2
+
+/*
+ * Utility functions to navigate various parts in a trace. If we change the
+ * layout/offset in the future, we just modify these functions and we don't need
+ * to propagate the changes to all the use cases.
+ */
+static inline char *getTraceBase(const JitEntry *p)
+{
+    return (char*)p->codeAddress -
+        (PROF_COUNTER_ADDR_SIZE + CHAIN_CELL_OFFSET_SIZE +
+         (p->u.info.instructionSet == DALVIK_JIT_ARM ? 0 : 1));
+}
+
+/* Handy function to retrieve the profile count */
+static inline JitTraceCounter_t getProfileCount(const JitEntry *entry)
+{
+    if (entry->dPC == 0 || entry->codeAddress == 0 ||
+        entry->codeAddress == dvmCompilerGetInterpretTemplate())
+        return 0;
+
+    JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+    return **p;
+}
+
+/* Handy function to reset the profile count */
+static inline void resetProfileCount(const JitEntry *entry)
+{
+    if (entry->dPC == 0 || entry->codeAddress == 0 ||
+        entry->codeAddress == dvmCompilerGetInterpretTemplate())
+        return;
+
+    JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+    **p = 0;
+}
+
+/* Get the pointer of the chain cell count */
+static inline ChainCellCounts* getChainCellCountsPointer(const char *base)
+{
+    /* 4 is the size of the profile count */
+    u2 *chainCellOffsetP = (u2 *) (base + PROF_COUNTER_ADDR_SIZE);
+    u2 chainCellOffset = *chainCellOffsetP;
+    return (ChainCellCounts *) ((char *) chainCellOffsetP + chainCellOffset);
+}
+
+/* Get the size of all chaining cells */
+static inline u4 getChainCellSize(const ChainCellCounts* pChainCellCounts)
+{
+    int cellSize = 0;
+    int i;
+
+    /* Get total count of chain cells */
+    for (i = 0; i < kChainingCellGap; i++) {
+        if (i != kChainingCellInvokePredicted) {
+            cellSize += pChainCellCounts->u.count[i] *
+                        (CHAIN_CELL_NORMAL_SIZE >> 2);
+        } else {
+            cellSize += pChainCellCounts->u.count[i] *
+                (CHAIN_CELL_PREDICTED_SIZE >> 2);
+        }
+    }
+    return cellSize;
+}
+
+/* Get the starting pointer of the trace description section */
+static JitTraceDescription* getTraceDescriptionPointer(const char *base)
+{
+    ChainCellCounts* pCellCounts = getChainCellCountsPointer(base);
+    return (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
+}
+
+/* Get the size of a trace description */
+static int getTraceDescriptionSize(const JitTraceDescription *desc)
+{
+    int runCount;
+    /* Trace end is always of non-meta type (ie isCode == true) */
+    for (runCount = 0; ; runCount++) {
+        if (desc->trace[runCount].isCode &&
+            desc->trace[runCount].info.frag.runEnd)
+           break;
+    }
+    return sizeof(JitTraceDescription) + ((runCount+1) * sizeof(JitTraceRun));
+}
+
 #if defined(SIGNATURE_BREAKPOINT)
 /* Inspect the assembled instruction stream to find potential matches */
 static void matchSignatureBreakpoint(const CompilationUnit *cUnit,
@@ -1148,7 +1313,7 @@
             if (j == gDvmJit.signatureBreakpointSize) {
                 LOGD("Signature match starting from offset %#x (%d words)",
                      i*4, gDvmJit.signatureBreakpointSize);
-                int descSize = jitTraceDescriptionSize(cUnit->traceDesc);
+                int descSize = getTraceDescriptionSize(cUnit->traceDesc);
                 JitTraceDescription *newCopy =
                     (JitTraceDescription *) malloc(descSize);
                 memcpy(newCopy, cUnit->traceDesc, descSize);
@@ -1161,55 +1326,19 @@
 #endif
 
 /*
- * Translation layout in the code cache.  Note that the codeAddress pointer
- * in JitTable will point directly to the code body (field codeAddress).  The
- * chain cell offset codeAddress - 2, and (if present) executionCount is at
- * codeAddress - 6.
- *
- *      +----------------------------+
- *      | Execution count            |  -> [Optional] 4 bytes
- *      +----------------------------+
- *   +--| Offset to chain cell counts|  -> 2 bytes
- *   |  +----------------------------+
- *   |  | Code body                  |  -> Start address for translation
- *   |  |                            |     variable in 2-byte chunks
- *   |  .                            .     (JitTable's codeAddress points here)
- *   |  .                            .
- *   |  |                            |
- *   |  +----------------------------+
- *   |  | Chaining Cells             |  -> 12/16 bytes each, must be 4 byte aligned
- *   |  .                            .
- *   |  .                            .
- *   |  |                            |
- *   |  +----------------------------+
- *   |  | Gap for large switch stmt  |  -> # cases >= MAX_CHAINED_SWITCH_CASES
- *   |  +----------------------------+
- *   +->| Chaining cell counts       |  -> 8 bytes, chain cell counts by type
- *      +----------------------------+
- *      | Trace description          |  -> variable sized
- *      .                            .
- *      |                            |
- *      +----------------------------+
- *      | Literal pool               |  -> 4-byte aligned, variable size
- *      .                            .
- *      .                            .
- *      |                            |
- *      +----------------------------+
- *
  * Go over each instruction in the list and calculate the offset from the top
  * before sending them off to the assembler. If out-of-range branch distance is
  * seen rearrange the instructions a bit to correct it.
  */
 void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info)
 {
-    LIR *lir;
     ArmLIR *armLIR;
     int offset = 0;
     int i;
     ChainCellCounts chainCellCounts;
     int descSize =
-        cUnit->wholeMethod ? 0 : jitTraceDescriptionSize(cUnit->traceDesc);
-    int chainingCellGap;
+        cUnit->methodJitMode ? 0 : getTraceDescriptionSize(cUnit->traceDesc);
+    int chainingCellGap = 0;
 
     info->instructionSet = cUnit->instructionSet;
 
@@ -1218,9 +1347,9 @@
          armLIR;
          armLIR = NEXT_LIR(armLIR)) {
         armLIR->generic.offset = offset;
-        if (armLIR->opcode >= 0 && !armLIR->isNop) {
-            armLIR->size = EncodingMap[armLIR->opcode].size * 2;
-            offset += armLIR->size;
+        if (armLIR->opcode >= 0 && !armLIR->flags.isNop) {
+            armLIR->flags.size = EncodingMap[armLIR->opcode].size * 2;
+            offset += armLIR->flags.size;
         } else if (armLIR->opcode == kArmPseudoPseudoAlign4) {
             if (offset & 0x2) {
                 offset += 2;
@@ -1235,41 +1364,43 @@
     /* Const values have to be word aligned */
     offset = (offset + 3) & ~3;
 
-    /*
-     * Get the gap (# of u4) between the offset of chaining cell count and
-     * the bottom of real chaining cells. If the translation has chaining
-     * cells, the gap is guaranteed to be multiples of 4.
-     */
-    chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2;
-
-    /* Add space for chain cell counts & trace description */
     u4 chainCellOffset = offset;
-    ArmLIR *chainCellOffsetLIR = (ArmLIR *) cUnit->chainCellOffsetLIR;
-    assert(chainCellOffsetLIR);
-    assert(chainCellOffset < 0x10000);
-    assert(chainCellOffsetLIR->opcode == kArm16BitData &&
-           chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG);
+    ArmLIR *chainCellOffsetLIR = NULL;
 
-    /*
-     * Replace the CHAIN_CELL_OFFSET_TAG with the real value. If trace
-     * profiling is enabled, subtract 4 (occupied by the counter word) from
-     * the absolute offset as the value stored in chainCellOffsetLIR is the
-     * delta from &chainCellOffsetLIR to &ChainCellCounts.
-     */
-    chainCellOffsetLIR->operands[0] =
-        gDvmJit.profile ? (chainCellOffset - 4) : chainCellOffset;
+    if (!cUnit->methodJitMode) {
+        /*
+         * Get the gap (# of u4) between the offset of chaining cell count and
+         * the bottom of real chaining cells. If the translation has chaining
+         * cells, the gap is guaranteed to be multiples of 4.
+         */
+        chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2;
 
-    offset += sizeof(chainCellCounts) + descSize;
+        /* Add space for chain cell counts & trace description */
+        chainCellOffsetLIR = (ArmLIR *) cUnit->chainCellOffsetLIR;
+        assert(chainCellOffsetLIR);
+        assert(chainCellOffset < 0x10000);
+        assert(chainCellOffsetLIR->opcode == kArm16BitData &&
+               chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG);
 
-    assert((offset & 0x3) == 0);  /* Should still be word aligned */
+        /*
+         * Adjust the CHAIN_CELL_OFFSET_TAG LIR's offset to remove the
+         * space occupied by the pointer to the trace profiling counter.
+         */
+        chainCellOffsetLIR->operands[0] = chainCellOffset - 4;
+
+        offset += sizeof(chainCellCounts) + descSize;
+
+        assert((offset & 0x3) == 0);  /* Should still be word aligned */
+    }
 
     /* Set up offsets for literals */
     cUnit->dataOffset = offset;
 
-    for (lir = cUnit->wordList; lir; lir = lir->next) {
-        lir->offset = offset;
-        offset += 4;
-    }
+    /*
+     * Assign each class pointer/constant an offset from the beginning of the
+     * compilation unit.
+     */
+    offset = assignLiteralOffset(cUnit, offset);
 
     cUnit->totalSize = offset;
 
@@ -1280,7 +1411,7 @@
     }
 
     /* Allocate enough space for the code block */
-    cUnit->codeBuffer = dvmCompilerNew(chainCellOffset, true);
+    cUnit->codeBuffer = (unsigned char *)dvmCompilerNew(chainCellOffset, true);
     if (cUnit->codeBuffer == NULL) {
         LOGE("Code buffer allocation failure\n");
         cUnit->baseAddr = NULL;
@@ -1299,8 +1430,10 @@
             break;
         case kRetryAll:
             if (cUnit->assemblerRetries < MAX_ASSEMBLER_RETRIES) {
-                /* Restore pristine chain cell marker on retry */
-                chainCellOffsetLIR->operands[0] = CHAIN_CELL_OFFSET_TAG;
+                if (!cUnit->methodJitMode) {
+                    /* Restore pristine chain cell marker on retry */
+                    chainCellOffsetLIR->operands[0] = CHAIN_CELL_OFFSET_TAG;
+                }
                 return;
             }
             /* Too many retries - reset and try cutting the trace in half */
@@ -1324,6 +1457,22 @@
     /* Don't go all the way if the goal is just to get the verbose output */
     if (info->discardResult) return;
 
+    /*
+     * The cache might disappear - acquire lock and check version
+     * Continue holding lock until translation cache update is complete.
+     * These actions are required here in the compiler thread because
+     * it is unaffected by suspend requests and doesn't know if a
+     * translation cache flush is in progress.
+     */
+    dvmLockMutex(&gDvmJit.compilerLock);
+    if (info->cacheVersion != gDvmJit.cacheVersion) {
+        /* Cache changed - discard current translation */
+        info->discardResult = true;
+        info->codeAddress = NULL;
+        dvmUnlockMutex(&gDvmJit.compilerLock);
+        return;
+    }
+
     cUnit->baseAddr = (char *) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
     gDvmJit.codeCacheByteUsed += offset;
 
@@ -1333,36 +1482,44 @@
     memcpy((char*)cUnit->baseAddr, cUnit->codeBuffer, chainCellOffset);
     gDvmJit.numCompilations++;
 
-    /* Install the chaining cell counts */
-    for (i=0; i< kChainingCellGap; i++) {
-        chainCellCounts.u.count[i] = cUnit->numChainingCells[i];
+    if (!cUnit->methodJitMode) {
+        /* Install the chaining cell counts */
+        for (i=0; i< kChainingCellGap; i++) {
+            chainCellCounts.u.count[i] = cUnit->numChainingCells[i];
+        }
+
+        /* Set the gap number in the chaining cell count structure */
+        chainCellCounts.u.count[kChainingCellGap] = chainingCellGap;
+
+        memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts,
+               sizeof(chainCellCounts));
+
+        /* Install the trace description */
+        memcpy((char*) cUnit->baseAddr + chainCellOffset +
+                       sizeof(chainCellCounts),
+               cUnit->traceDesc, descSize);
     }
 
-    /* Set the gap number in the chaining cell count structure */
-    chainCellCounts.u.count[kChainingCellGap] = chainingCellGap;
-
-    memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts,
-           sizeof(chainCellCounts));
-
-    /* Install the trace description */
-    memcpy((char*)cUnit->baseAddr + chainCellOffset + sizeof(chainCellCounts),
-           cUnit->traceDesc, descSize);
-
     /* Write the literals directly into the code cache */
-    installDataContent(cUnit);
+    installLiteralPools(cUnit);
 
     /* Flush dcache and invalidate the icache to maintain coherence */
-    cacheflush((long)cUnit->baseAddr,
-               (long)((char *) cUnit->baseAddr + offset), 0);
+    dvmCompilerCacheFlush((long)cUnit->baseAddr,
+                          (long)((char *) cUnit->baseAddr + offset), 0);
     UPDATE_CODE_CACHE_PATCHES();
 
     PROTECT_CODE_CACHE(cUnit->baseAddr, offset);
 
+    /* Translation cache update complete - release lock */
+    dvmUnlockMutex(&gDvmJit.compilerLock);
+
     /* Record code entry point and instruction set */
     info->codeAddress = (char*)cUnit->baseAddr + cUnit->headerSize;
     /* If applicable, mark low bit to denote thumb */
     if (info->instructionSet != DALVIK_JIT_ARM)
         info->codeAddress = (char*)info->codeAddress + 1;
+    /* transfer the size of the profiling code */
+    info->profileCodeSize = cUnit->profileCodeSize;
 }
 
 /*
@@ -1448,7 +1605,7 @@
         UNPROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
 
         *branchAddr = newInst;
-        cacheflush((long)branchAddr, (long)branchAddr + 4, 0);
+        dvmCompilerCacheFlush((long)branchAddr, (long)branchAddr + 4, 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
@@ -1487,8 +1644,8 @@
          * will bring the uninitialized chaining cell to life.
          */
         android_atomic_release_store((int32_t)newContent->clazz,
-            (void*) &cellAddr->clazz);
-        cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
+            (volatile int32_t *)(void *)&cellAddr->clazz);
+        dvmCompilerCacheFlush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
@@ -1531,8 +1688,14 @@
      */
     } else if (gDvmJit.compilerICPatchIndex < COMPILER_IC_PATCH_QUEUE_SIZE) {
         int index = gDvmJit.compilerICPatchIndex++;
+        const ClassObject *clazz = newContent->clazz;
+
         gDvmJit.compilerICPatchQueue[index].cellAddr = cellAddr;
         gDvmJit.compilerICPatchQueue[index].cellContent = *newContent;
+        gDvmJit.compilerICPatchQueue[index].classDescriptor = clazz->descriptor;
+        gDvmJit.compilerICPatchQueue[index].classLoader = clazz->classLoader;
+        /* For verification purpose only */
+        gDvmJit.compilerICPatchQueue[index].serialNumber = clazz->serialNumber;
 #if defined(WITH_JIT_TUNING)
         gDvmJit.icPatchQueued++;
 #endif
@@ -1562,7 +1725,7 @@
  *      next safe point.
  */
 const Method *dvmJitToPatchPredictedChain(const Method *method,
-                                          InterpState *interpState,
+                                          Thread *self,
                                           PredictedChainingCell *cell,
                                           const ClassObject *clazz)
 {
@@ -1579,13 +1742,13 @@
          * trigger immediate patching and will continue to fail to match with
          * a real clazz pointer.
          */
-        cell->clazz = (void *) PREDICTED_CHAIN_FAKE_CLAZZ;
+        cell->clazz = (ClassObject *) PREDICTED_CHAIN_FAKE_CLAZZ;
 
         UPDATE_CODE_CACHE_PATCHES();
         PROTECT_CODE_CACHE(cell, sizeof(*cell));
         goto done;
     }
-    int tgtAddr = (int) dvmJitGetCodeAddr(method->insns);
+    int tgtAddr = (int) dvmJitGetTraceAddr(method->insns);
 
     /*
      * Compilation not made yet for the callee. Reset the counter to a small
@@ -1602,7 +1765,7 @@
     PredictedChainingCell newCell;
 
     if (cell->clazz == NULL) {
-        newRechainCount = interpState->icRechainCount;
+        newRechainCount = self->icRechainCount;
     }
 
     int baseAddr = (int) cell + 4;   // PC is cur_addr + 4
@@ -1623,7 +1786,7 @@
     inlineCachePatchEnqueue(cell, &newCell);
 #endif
 done:
-    interpState->icRechainCount = newRechainCount;
+    self->icRechainCount = newRechainCount;
     return method;
 }
 
@@ -1656,10 +1819,16 @@
     maxAddr = (PredictedChainingCell *) gDvmJit.codeCache;
 
     for (i = 0; i < gDvmJit.compilerICPatchIndex; i++) {
-        PredictedChainingCell *cellAddr =
-            gDvmJit.compilerICPatchQueue[i].cellAddr;
-        PredictedChainingCell *cellContent =
-            &gDvmJit.compilerICPatchQueue[i].cellContent;
+        ICPatchWorkOrder *workOrder = &gDvmJit.compilerICPatchQueue[i];
+        PredictedChainingCell *cellAddr = workOrder->cellAddr;
+        PredictedChainingCell *cellContent = &workOrder->cellContent;
+        ClassObject *clazz = dvmFindClassNoInit(workOrder->classDescriptor,
+                                                workOrder->classLoader);
+
+        assert(clazz->serialNumber == workOrder->serialNumber);
+
+        /* Use the newly resolved clazz pointer */
+        cellContent->clazz = clazz;
 
         COMPILER_TRACE_CHAINING(
             LOGD("Jit Runtime: predicted chain %p from %s to %s (%s) "
@@ -1676,7 +1845,7 @@
     }
 
     /* Then synchronize the I/D cache */
-    cacheflush((long) minAddr, (long) (maxAddr+1), 0);
+    dvmCompilerCacheFlush((long) minAddr, (long) (maxAddr+1), 0);
     UPDATE_CODE_CACHE_PATCHES();
 
     PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
@@ -1692,35 +1861,22 @@
  * the incoming codeAddr is a thumb code address, and therefore has
  * the low bit set.
  */
-u4* dvmJitUnchain(void* codeAddr)
+static u4* unchainSingle(JitEntry *trace)
 {
-    u2* pChainCellOffset = (u2*)((char*)codeAddr - 3);
-    u2 chainCellOffset = *pChainCellOffset;
-    ChainCellCounts *pChainCellCounts =
-          (ChainCellCounts*)((char*)codeAddr + chainCellOffset - 3);
-    int cellSize;
+    const char *base = getTraceBase(trace);
+    ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+    int cellSize = getChainCellSize(pChainCellCounts);
     u4* pChainCells;
-    u4* pStart;
     u4 newInst;
     int i,j;
     PredictedChainingCell *predChainCell;
 
-    /* Get total count of chain cells */
-    for (i = 0, cellSize = 0; i < kChainingCellGap; i++) {
-        if (i != kChainingCellInvokePredicted) {
-            cellSize += pChainCellCounts->u.count[i] * (CHAIN_CELL_NORMAL_SIZE >> 2);
-        } else {
-            cellSize += pChainCellCounts->u.count[i] *
-                (CHAIN_CELL_PREDICTED_SIZE >> 2);
-        }
-    }
-
     if (cellSize == 0)
         return (u4 *) pChainCellCounts;
 
     /* Locate the beginning of the chain cell region */
-    pStart = pChainCells = ((u4 *) pChainCellCounts) - cellSize -
-             pChainCellCounts->u.count[kChainingCellGap];
+    pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+                  pChainCellCounts->u.count[kChainingCellGap];
 
     /* The cells are sorted in order - walk through them and reset */
     for (i = 0; i < kChainingCellGap; i++) {
@@ -1784,20 +1940,21 @@
 
         for (i = 0; i < gDvmJit.jitTableSize; i++) {
             if (gDvmJit.pJitEntryTable[i].dPC &&
-                   gDvmJit.pJitEntryTable[i].codeAddress &&
-                   (gDvmJit.pJitEntryTable[i].codeAddress !=
-                    dvmCompilerGetInterpretTemplate())) {
+                !gDvmJit.pJitEntryTable[i].u.info.isMethodEntry &&
+                gDvmJit.pJitEntryTable[i].codeAddress &&
+                (gDvmJit.pJitEntryTable[i].codeAddress !=
+                 dvmCompilerGetInterpretTemplate())) {
                 u4* lastAddress;
-                lastAddress =
-                      dvmJitUnchain(gDvmJit.pJitEntryTable[i].codeAddress);
+                lastAddress = unchainSingle(&gDvmJit.pJitEntryTable[i]);
                 if (lowAddress == NULL ||
-                      (u4*)gDvmJit.pJitEntryTable[i].codeAddress < lowAddress)
+                      (u4*)gDvmJit.pJitEntryTable[i].codeAddress <
+                      lowAddress)
                     lowAddress = lastAddress;
                 if (lastAddress > highAddress)
                     highAddress = lastAddress;
             }
         }
-        cacheflush((long)lowAddress, (long)highAddress, 0);
+        dvmCompilerCacheFlush((long)lowAddress, (long)highAddress, 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
@@ -1826,52 +1983,33 @@
     return 0;
 }
 
-static char *getTraceBase(const JitEntry *p)
-{
-    return (char*)p->codeAddress -
-        (6 + (p->u.info.instructionSet == DALVIK_JIT_ARM ? 0 : 1));
-}
-
 /* Dumps profile info for a single trace */
 static int dumpTraceProfile(JitEntry *p, bool silent, bool reset,
                             unsigned long sum)
 {
-    ChainCellCounts* pCellCounts;
-    char* traceBase;
-    u4* pExecutionCount;
-    u4 executionCount;
-    u2* pCellOffset;
-    JitTraceDescription *desc;
-    const Method* method;
     int idx;
 
-    traceBase = getTraceBase(p);
-
     if (p->codeAddress == NULL) {
         if (!silent)
-            LOGD("TRACEPROFILE 0x%08x 0 NULL 0 0", (int)traceBase);
+            LOGD("TRACEPROFILE NULL");
         return 0;
     }
     if (p->codeAddress == dvmCompilerGetInterpretTemplate()) {
         if (!silent)
-            LOGD("TRACEPROFILE 0x%08x 0 INTERPRET_ONLY  0 0", (int)traceBase);
+            LOGD("TRACEPROFILE INTERPRET_ONLY");
         return 0;
     }
-
-    pExecutionCount = (u4*) (traceBase);
-    executionCount = *pExecutionCount;
+    JitTraceCounter_t count = getProfileCount(p);
     if (reset) {
-        *pExecutionCount =0;
+        resetProfileCount(p);
     }
     if (silent) {
-        return executionCount;
+        return count;
     }
-    pCellOffset = (u2*) (traceBase + 4);
-    pCellCounts = (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
-    desc = (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
-    method = desc->method;
+    JitTraceDescription *desc = getTraceDescriptionPointer(getTraceBase(p));
+    const Method *method = desc->method;
     char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
-    jitProfileAddrToLine addrToLine = {0, desc->trace[0].frag.startOffset};
+    jitProfileAddrToLine addrToLine = {0, desc->trace[0].info.frag.startOffset};
 
     /*
      * We may end up decoding the debug information for the same method
@@ -1888,18 +2026,18 @@
                        addrToLineCb, NULL, &addrToLine);
 
     LOGD("TRACEPROFILE 0x%08x % 10d %5.2f%% [%#x(+%d), %d] %s%s;%s",
-         (int)traceBase,
-         executionCount,
-         ((float ) executionCount) / sum * 100.0,
-         desc->trace[0].frag.startOffset,
-         desc->trace[0].frag.numInsts,
+         (int) getTraceBase(p),
+         count,
+         ((float ) count) / sum * 100.0,
+         desc->trace[0].info.frag.startOffset,
+         desc->trace[0].info.frag.numInsts,
          addrToLine.lineNum,
          method->clazz->descriptor, method->name, methodDesc);
     free(methodDesc);
 
     /* Find the last fragment (ie runEnd is set) */
     for (idx = 0;
-         desc->trace[idx].frag.isCode && !desc->trace[idx].frag.runEnd;
+         desc->trace[idx].isCode && !desc->trace[idx].info.frag.runEnd;
          idx++) {
     }
 
@@ -1907,64 +2045,45 @@
      * runEnd must comes with a JitCodeDesc frag. If isCode is false it must
      * be a meta info field (only used by callsite info for now).
      */
-    if (!desc->trace[idx].frag.isCode) {
-        const Method *method = desc->trace[idx+1].meta;
+    if (!desc->trace[idx].isCode) {
+        const Method *method = (const Method *)
+            desc->trace[idx+JIT_TRACE_CUR_METHOD-1].info.meta;
         char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
         /* Print the callee info in the trace */
         LOGD("    -> %s%s;%s", method->clazz->descriptor, method->name,
              methodDesc);
     }
 
-    return executionCount;
+    return count;
 }
 
 /* Create a copy of the trace descriptor of an existing compilation */
 JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
                                             const JitEntry *knownEntry)
 {
-    const JitEntry *jitEntry = knownEntry ? knownEntry : dvmFindJitEntry(pc);
-    if (jitEntry == NULL) return NULL;
+    const JitEntry *jitEntry = knownEntry ? knownEntry
+                                          : dvmJitFindEntry(pc, false);
+    if ((jitEntry == NULL) || (jitEntry->codeAddress == 0))
+        return NULL;
 
-    /* Find out the startint point */
-    char *traceBase = getTraceBase(jitEntry);
-
-    /* Then find out the starting point of the chaining cell */
-    u2 *pCellOffset = (u2*) (traceBase + 4);
-    ChainCellCounts *pCellCounts =
-        (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
-
-    /* From there we can find out the starting point of the trace descriptor */
     JitTraceDescription *desc =
-        (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
+        getTraceDescriptionPointer(getTraceBase(jitEntry));
 
     /* Now make a copy and return */
-    int descSize = jitTraceDescriptionSize(desc);
+    int descSize = getTraceDescriptionSize(desc);
     JitTraceDescription *newCopy = (JitTraceDescription *) malloc(descSize);
     memcpy(newCopy, desc, descSize);
     return newCopy;
 }
 
-/* Handy function to retrieve the profile count */
-static inline int getProfileCount(const JitEntry *entry)
-{
-    if (entry->dPC == 0 || entry->codeAddress == 0 ||
-        entry->codeAddress == dvmCompilerGetInterpretTemplate())
-        return 0;
-
-    u4 *pExecutionCount = (u4 *) getTraceBase(entry);
-
-    return *pExecutionCount;
-}
-
-
 /* qsort callback function */
 static int sortTraceProfileCount(const void *entry1, const void *entry2)
 {
-    const JitEntry *jitEntry1 = entry1;
-    const JitEntry *jitEntry2 = entry2;
+    const JitEntry *jitEntry1 = (const JitEntry *)entry1;
+    const JitEntry *jitEntry2 = (const JitEntry *)entry2;
 
-    int count1 = getProfileCount(jitEntry1);
-    int count2 = getProfileCount(jitEntry2);
+    JitTraceCounter_t count1 = getProfileCount(jitEntry1);
+    JitTraceCounter_t count2 = getProfileCount(jitEntry2);
     return (count1 == count2) ? 0 : ((count1 > count2) ? -1 : 1);
 }
 
@@ -1980,7 +2099,7 @@
     dvmLockMutex(&gDvmJit.tableLock);
 
     /* Sort the entries by descending order */
-    sortedEntries = malloc(sizeof(JitEntry) * gDvmJit.jitTableSize);
+    sortedEntries = (JitEntry *)malloc(sizeof(JitEntry) * gDvmJit.jitTableSize);
     if (sortedEntries == NULL)
         goto done;
     memcpy(sortedEntries, gDvmJit.pJitEntryTable,
@@ -2023,8 +2142,10 @@
         }
         JitTraceDescription* desc =
             dvmCopyTraceDescriptor(NULL, &sortedEntries[i]);
-        dvmCompilerWorkEnqueue(sortedEntries[i].dPC,
-                               kWorkOrderTraceDebug, desc);
+        if (desc) {
+            dvmCompilerWorkEnqueue(sortedEntries[i].dPC,
+                                   kWorkOrderTraceDebug, desc);
+        }
     }
 
     free(sortedEntries);
@@ -2033,6 +2154,145 @@
     return;
 }
 
+static void findClassPointersSingleTrace(char *base, void (*callback)(void *))
+{
+    unsigned int chainTypeIdx, chainIdx;
+    ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+    int cellSize = getChainCellSize(pChainCellCounts);
+    /* Scan the chaining cells */
+    if (cellSize) {
+        /* Locate the beginning of the chain cell region */
+        u4 *pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+            pChainCellCounts->u.count[kChainingCellGap];
+        /* The cells are sorted in order - walk through them */
+        for (chainTypeIdx = 0; chainTypeIdx < kChainingCellGap;
+             chainTypeIdx++) {
+            if (chainTypeIdx != kChainingCellInvokePredicted) {
+                /* In 32-bit words */
+                pChainCells += (CHAIN_CELL_NORMAL_SIZE >> 2) *
+                    pChainCellCounts->u.count[chainTypeIdx];
+                continue;
+            }
+            for (chainIdx = 0;
+                 chainIdx < pChainCellCounts->u.count[chainTypeIdx];
+                 chainIdx++) {
+                PredictedChainingCell *cell =
+                    (PredictedChainingCell *) pChainCells;
+                /*
+                 * Report the cell if it contains a sane class
+                 * pointer.
+                 */
+                if (cell->clazz != NULL &&
+                    cell->clazz !=
+                      (ClassObject *) PREDICTED_CHAIN_FAKE_CLAZZ) {
+                    callback(&cell->clazz);
+                }
+                pChainCells += CHAIN_CELL_PREDICTED_SIZE >> 2;
+            }
+        }
+    }
+
+    /* Scan the class pointer pool */
+    JitTraceDescription *desc = getTraceDescriptionPointer(base);
+    int descSize = getTraceDescriptionSize(desc);
+    int *classPointerP = (int *) ((char *) desc + descSize);
+    int numClassPointers = *classPointerP++;
+    for (; numClassPointers; numClassPointers--, classPointerP++) {
+        callback(classPointerP);
+    }
+}
+
+/*
+ * Scan class pointers in each translation and pass its address to the callback
+ * function. Currently such a pointers can be found in the pointer pool and the
+ * clazz field in the predicted chaining cells.
+ */
+void dvmJitScanAllClassPointers(void (*callback)(void *))
+{
+    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+    /* Handle the inflight compilation first */
+    if (gDvmJit.inflightBaseAddr)
+        findClassPointersSingleTrace((char *) gDvmJit.inflightBaseAddr,
+                                     callback);
+
+    if (gDvmJit.pJitEntryTable != NULL) {
+        unsigned int traceIdx;
+        dvmLockMutex(&gDvmJit.tableLock);
+        for (traceIdx = 0; traceIdx < gDvmJit.jitTableSize; traceIdx++) {
+            const JitEntry *entry = &gDvmJit.pJitEntryTable[traceIdx];
+            if (entry->dPC &&
+                !entry->u.info.isMethodEntry &&
+                entry->codeAddress &&
+                (entry->codeAddress != dvmCompilerGetInterpretTemplate())) {
+                char *base = getTraceBase(entry);
+                findClassPointersSingleTrace(base, callback);
+            }
+        }
+        dvmUnlockMutex(&gDvmJit.tableLock);
+    }
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+}
+
+/*
+ * Provide the final touch on the class object pointer pool to install the
+ * actual pointers. The thread has to be in the running state.
+ */
+void dvmJitInstallClassObjectPointers(CompilationUnit *cUnit, char *codeAddress)
+{
+    char *base = codeAddress - cUnit->headerSize -
+                 (cUnit->instructionSet == DALVIK_JIT_ARM ? 0 : 1);
+
+    /* Scan the class pointer pool */
+    JitTraceDescription *desc = getTraceDescriptionPointer(base);
+    int descSize = getTraceDescriptionSize(desc);
+    intptr_t *classPointerP = (int *) ((char *) desc + descSize);
+    int numClassPointers = *(int *)classPointerP++;
+    intptr_t *startClassPointerP = classPointerP;
+
+    UNPROTECT_CODE_CACHE(startClassPointerP,
+                         numClassPointers * sizeof(intptr_t));
+    /*
+     * Change the thread state to VM_RUNNING so that GC won't be happening
+     * when the assembler looks up the class pointers.
+     */
+    dvmChangeStatus(gDvmJit.compilerThread, THREAD_RUNNING);
+#if defined(WITH_JIT_TUNING)
+    u8 startTime = dvmGetRelativeTimeUsec();
+#endif
+    for (;numClassPointers; numClassPointers--) {
+        CallsiteInfo *callsiteInfo = (CallsiteInfo *) *classPointerP;
+        ClassObject *clazz = dvmFindClassNoInit(
+            callsiteInfo->classDescriptor, callsiteInfo->classLoader);
+        assert(!strcmp(clazz->descriptor, callsiteInfo->classDescriptor));
+        *classPointerP++ = (intptr_t) clazz;
+    }
+
+    /*
+     * Register the base address so that if GC kicks in after the thread state
+     * has been changed to VMWAIT and before the compiled code is registered
+     * in the JIT table, its content can be patched if class objects are
+     * moved.
+     */
+    gDvmJit.inflightBaseAddr = base;
+
+#if defined(WITH_JIT_TUNING)
+    u8 blockTime = dvmGetRelativeTimeUsec() - startTime;
+    gDvmJit.compilerThreadBlockGCTime += blockTime;
+    if (blockTime > gDvmJit.maxCompilerThreadBlockGCTime)
+        gDvmJit.maxCompilerThreadBlockGCTime = blockTime;
+    gDvmJit.numCompilerThreadBlockGC++;
+#endif
+    /* Change the thread state back to VMWAIT */
+    dvmChangeStatus(gDvmJit.compilerThread, THREAD_VMWAIT);
+
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(startClassPointerP, numClassPointers * sizeof(intptr_t));
+}
+
 #if defined(WITH_SELF_VERIFICATION)
 /*
  * The following are used to keep compiled loads and stores from modifying
diff --git a/vm/compiler/codegen/arm/CalloutHelper.h b/vm/compiler/codegen/arm/CalloutHelper.h
index c432f82..931cf0f 100644
--- a/vm/compiler/codegen/arm/CalloutHelper.h
+++ b/vm/compiler/codegen/arm/CalloutHelper.h
@@ -82,7 +82,7 @@
 
 /* Originally declared in compiler/codegen/arm/Assemble.c */
 const Method *dvmJitToPatchPredictedChain(const Method *method,
-                                          InterpState *interpState,
+                                          Thread *self,
                                           PredictedChainingCell *cell,
                                           const ClassObject *clazz);
 
@@ -109,23 +109,6 @@
 /*
  * Functions declared in gDvmInlineOpsTable[] are used for
  * OP_EXECUTE_INLINE & OP_EXECUTE_INLINE_RANGE.
- *
- *      org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod
- *      javaLangString_charAt
- *      javaLangString_compareTo
- *      javaLangString_equals
- *      javaLangString_indexOf_I
- *      javaLangString_indexOf_II
- *      javaLangString_length
- *      javaLangMath_abs_int
- *      javaLangMath_abs_long
- *      javaLangMath_abs_float
- *      javaLangMath_abs_double
- *      javaLangMath_min_int
- *      javaLangMath_max_int
- *      javaLangMath_sqrt
- *      javaLangMath_cos
- *      javaLangMath_sin
  */
 double sqrt(double x);  // INLINE_MATH_SQRT
 
diff --git a/vm/compiler/codegen/arm/Codegen.h b/vm/compiler/codegen/arm/Codegen.h
index 8522e62..330619b 100644
--- a/vm/compiler/codegen/arm/Codegen.h
+++ b/vm/compiler/codegen/arm/Codegen.h
@@ -45,8 +45,7 @@
 
 static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir);
 
-#if defined(WITH_DEADLOCK_PREDICTION) || defined(WITH_MONITOR_TRACKING) || \
-    defined(__ARM_ARCH_5__)
+#if defined(__ARM_ARCH_5__)
 static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir);
 #endif
 
diff --git a/vm/compiler/codegen/arm/CodegenCommon.c b/vm/compiler/codegen/arm/CodegenCommon.c
index 4a20579..ae41fe9 100644
--- a/vm/compiler/codegen/arm/CodegenCommon.c
+++ b/vm/compiler/codegen/arm/CodegenCommon.c
@@ -35,14 +35,12 @@
 static void setMemRefType(ArmLIR *lir, bool isLoad, int memType)
 {
     u8 *maskPtr;
-    u8 mask;
-    assert( EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
+    u8 mask = ENCODE_MEM;;
+    assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
     if (isLoad) {
         maskPtr = &lir->useMask;
-        mask = ENCODE_MEM_USE;
     } else {
         maskPtr = &lir->defMask;
-        mask = ENCODE_MEM_DEF;
     }
     /* Clear out the memref flags */
     *maskPtr &= ~mask;
@@ -50,14 +48,19 @@
     switch(memType) {
         case kLiteral:
             assert(isLoad);
-            *maskPtr |= (ENCODE_LITERAL | ENCODE_LITPOOL_REF);
+            *maskPtr |= ENCODE_LITERAL;
             break;
         case kDalvikReg:
-            *maskPtr |= (ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
+            *maskPtr |= ENCODE_DALVIK_REG;
             break;
         case kHeapRef:
             *maskPtr |= ENCODE_HEAP_REF;
             break;
+        case kMustNotAlias:
+            /* Currently only loads can be marked as kMustNotAlias */
+            assert(!(EncodingMap[lir->opcode].flags & IS_STORE));
+            *maskPtr |= ENCODE_MUST_NOT_ALIAS;
+            break;
         default:
             LOGE("Jit: invalid memref kind - %d", memType);
             assert(0);  // Bail if debug build, set worst-case in the field
@@ -66,7 +69,7 @@
 }
 
 /*
- * Mark load/store instructions that access Dalvik registers through rFP +
+ * Mark load/store instructions that access Dalvik registers through r5FP +
  * offset.
  */
 static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
@@ -84,9 +87,9 @@
 }
 
 /*
- * Decode the register id and mark the corresponding bit(s).
+ * Decode the register id.
  */
-static inline void setupRegMask(u8 *mask, int reg)
+static inline u8 getRegMaskCommon(int reg)
 {
     u8 seed;
     int shift;
@@ -100,7 +103,21 @@
     shift = FPREG(reg) ? kFPReg0 : 0;
     /* Expand the double register id into single offset */
     shift += regId;
-    *mask |= seed << shift;
+    return (seed << shift);
+}
+
+/* External version of getRegMaskCommon */
+u8 dvmGetRegResourceMask(int reg)
+{
+    return getRegMaskCommon(reg);
+}
+
+/*
+ * Mark the corresponding bit(s).
+ */
+static inline void setupRegMask(u8 *mask, int reg)
+{
+    *mask |= getRegMaskCommon(reg);
 }
 
 /*
@@ -124,9 +141,13 @@
         setMemRefType(lir, flags & IS_LOAD, kHeapRef);
     }
 
+    /*
+     * Conservatively assume the branch here will call out a function that in
+     * turn will trash everything.
+     */
     if (flags & IS_BRANCH) {
-        lir->defMask |= ENCODE_REG_PC;
-        lir->useMask |= ENCODE_REG_PC;
+        lir->defMask = lir->useMask = ENCODE_ALL;
+        return;
     }
 
     if (flags & REG_DEF0) {
@@ -162,11 +183,6 @@
         lir->defMask = ENCODE_ALL;
     }
 
-    /* Set up the mask for resources that are used */
-    if (flags & IS_BRANCH) {
-        lir->useMask |= ENCODE_REG_PC;
-    }
-
     if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
         int i;
 
@@ -196,6 +212,49 @@
     if (flags & USES_CCODES) {
         lir->useMask |= ENCODE_CCODE;
     }
+
+    /* Fixup for kThumbPush/lr and kThumbPop/pc */
+    if (opcode == kThumbPush || opcode == kThumbPop) {
+        u8 r8Mask = getRegMaskCommon(r8);
+        if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
+            lir->useMask &= ~r8Mask;
+            lir->useMask |= ENCODE_REG_LR;
+        } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
+            lir->defMask &= ~r8Mask;
+            lir->defMask |= ENCODE_REG_PC;
+        }
+    }
+}
+
+/*
+ * Set up the accurate resource mask for branch instructions
+ */
+static void relaxBranchMasks(ArmLIR *lir)
+{
+    int flags = EncodingMap[lir->opcode].flags;
+
+    /* Make sure only branch instructions are passed here */
+    assert(flags & IS_BRANCH);
+
+    lir->useMask = lir->defMask = ENCODE_REG_PC;
+
+    if (flags & REG_DEF_LR) {
+        lir->defMask |= ENCODE_REG_LR;
+    }
+
+    if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+        int i;
+
+        for (i = 0; i < 4; i++) {
+            if (flags & (1 << (kRegUse0 + i))) {
+                setupRegMask(&lir->useMask, lir->operands[i]);
+            }
+        }
+    }
+
+    if (flags & USES_CCODES) {
+        lir->useMask |= ENCODE_CCODE;
+    }
 }
 
 /*
@@ -204,7 +263,7 @@
  */
 static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpcode opcode)
 {
-    ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND));
     insn->opcode = opcode;
     setupResourceMasks(insn);
@@ -215,7 +274,7 @@
 static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpcode opcode,
                            int dest)
 {
-    ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP));
     insn->opcode = opcode;
     insn->operands[0] = dest;
@@ -227,7 +286,7 @@
 static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpcode opcode,
                            int dest, int src1)
 {
-    ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     assert(isPseudoOpcode(opcode) ||
            (EncodingMap[opcode].flags & IS_BINARY_OP));
     insn->opcode = opcode;
@@ -241,7 +300,7 @@
 static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpcode opcode,
                            int dest, int src1, int src2)
 {
-    ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     if (!(EncodingMap[opcode].flags & IS_TERTIARY_OP)) {
         LOGE("Bad LIR3: %s[%d]",EncodingMap[opcode].name,opcode);
     }
@@ -260,7 +319,7 @@
 static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpcode opcode,
                            int dest, int src1, int src2, int info)
 {
-    ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     assert(isPseudoOpcode(opcode) ||
            (EncodingMap[opcode].flags & IS_QUAD_OP));
     insn->opcode = opcode;
@@ -298,10 +357,8 @@
  * Search the existing constants in the literal pool for an exact or close match
  * within specified delta (greater or equal to 0).
  */
-static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value,
-                                   unsigned int delta)
+static ArmLIR *scanLiteralPool(LIR *dataTarget, int value, unsigned int delta)
 {
-    LIR *dataTarget = cUnit->wordList;
     while (dataTarget) {
         if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
             delta)
@@ -317,14 +374,15 @@
  */
 
 /* Add a 32-bit constant either in the constant pool or mixed with code */
-static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace)
+static ArmLIR *addWordData(CompilationUnit *cUnit, LIR **constantListP,
+                           int value)
 {
     /* Add the constant to the literal pool */
-    if (!inPlace) {
-        ArmLIR *newValue = dvmCompilerNew(sizeof(ArmLIR), true);
+    if (constantListP) {
+        ArmLIR *newValue = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
         newValue->operands[0] = value;
-        newValue->generic.next = cUnit->wordList;
-        cUnit->wordList = (LIR *) newValue;
+        newValue->generic.next = *constantListP;
+        *constantListP = (LIR *) newValue;
         return newValue;
     } else {
         /* Add the constant in the middle of code stream */
@@ -371,14 +429,19 @@
     /* Set up the place holder to reconstruct this Dalvik PC */
     if (pcrLabel == NULL) {
         int dPC = (int) (cUnit->method->insns + dOffset);
-        pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+        pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
         pcrLabel->opcode = kArmPseudoPCReconstructionCell;
         pcrLabel->operands[0] = dPC;
         pcrLabel->operands[1] = dOffset;
         /* Insert the place holder to the growable list */
-        dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+        dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                              (intptr_t) pcrLabel);
     }
     /* Branch to the PC reconstruction code */
     branch->generic.target = (LIR *) pcrLabel;
+
+    /* Clear the conservative flags for branches that punt to the interpreter */
+    relaxBranchMasks(branch);
+
     return pcrLabel;
 }
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index 061ffb8..55c0647 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -32,7 +32,7 @@
     int regCardBase = dvmCompilerAllocTemp(cUnit);
     int regCardNo = dvmCompilerAllocTemp(cUnit);
     ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondEq, valReg, 0);
-    loadWordDisp(cUnit, rGLUE, offsetof(InterpState, cardTable),
+    loadWordDisp(cUnit, r6SELF, offsetof(Thread, cardTable),
                  regCardBase);
     opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
     storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
@@ -160,13 +160,16 @@
             return true;
     }
     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
-    LOAD_FUNC_ADDR(cUnit, rlr, (int)funct);
+    LOAD_FUNC_ADDR(cUnit, r14lr, (int)funct);
     loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
     loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
-    opReg(cUnit, kOpBlx, rlr);
+    opReg(cUnit, kOpBlx, r14lr);
     dvmCompilerClobberCallRegs(cUnit);
     rlResult = dvmCompilerGetReturnWide(cUnit);
     storeValueWide(cUnit, rlDest, rlResult);
+#if defined(WITH_SELF_VERIFICATION)
+    cUnit->usesLinkRegister = true;
+#endif
     return false;
 }
 
@@ -205,7 +208,7 @@
 static void selfVerificationBranchInsert(LIR *currentLIR, ArmOpcode opcode,
                           int dest, int src1)
 {
-     ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
      insn->opcode = opcode;
      insn->operands[0] = dest;
      insn->operands[1] = src1;
@@ -213,6 +216,31 @@
      dvmCompilerInsertLIRBefore(currentLIR, (LIR *) insn);
 }
 
+/*
+ * Example where r14 (LR) is preserved around a heap access under
+ * self-verification mode in Thumb2:
+ *
+ * D/dalvikvm( 1538): 0x59414c5e (0026): ldr     r14, [r15pc, #220] <-hoisted
+ * D/dalvikvm( 1538): 0x59414c62 (002a): mla     r4, r0, r8, r4
+ * D/dalvikvm( 1538): 0x59414c66 (002e): adds    r3, r4, r3
+ * D/dalvikvm( 1538): 0x59414c6a (0032): push    <r5, r14>    ---+
+ * D/dalvikvm( 1538): 0x59414c6c (0034): blx_1   0x5940f494      |
+ * D/dalvikvm( 1538): 0x59414c6e (0036): blx_2   see above       <-MEM_OP_DECODE
+ * D/dalvikvm( 1538): 0x59414c70 (0038): ldr     r10, [r9, #0]   |
+ * D/dalvikvm( 1538): 0x59414c74 (003c): pop     <r5, r14>    ---+
+ * D/dalvikvm( 1538): 0x59414c78 (0040): mov     r11, r10
+ * D/dalvikvm( 1538): 0x59414c7a (0042): asr     r12, r11, #31
+ * D/dalvikvm( 1538): 0x59414c7e (0046): movs    r0, r2
+ * D/dalvikvm( 1538): 0x59414c80 (0048): movs    r1, r3
+ * D/dalvikvm( 1538): 0x59414c82 (004a): str     r2, [r5, #16]
+ * D/dalvikvm( 1538): 0x59414c84 (004c): mov     r2, r11
+ * D/dalvikvm( 1538): 0x59414c86 (004e): str     r3, [r5, #20]
+ * D/dalvikvm( 1538): 0x59414c88 (0050): mov     r3, r12
+ * D/dalvikvm( 1538): 0x59414c8a (0052): str     r11, [r5, #24]
+ * D/dalvikvm( 1538): 0x59414c8e (0056): str     r12, [r5, #28]
+ * D/dalvikvm( 1538): 0x59414c92 (005a): blx     r14             <-use of LR
+ *
+ */
 static void selfVerificationBranchInsertPass(CompilationUnit *cUnit)
 {
     ArmLIR *thisLIR;
@@ -221,7 +249,19 @@
     for (thisLIR = (ArmLIR *) cUnit->firstLIRInsn;
          thisLIR != (ArmLIR *) cUnit->lastLIRInsn;
          thisLIR = NEXT_LIR(thisLIR)) {
-        if (thisLIR->branchInsertSV) {
+        if (!thisLIR->flags.isNop && thisLIR->flags.insertWrapper) {
+            /*
+             * Push r5(FP) and r14(LR) onto stack. We need to make sure that
+             * SP is 8-byte aligned, and we use r5 as a temp to restore LR
+             * for Thumb-only target since LR cannot be directly accessed in
+             * Thumb mode. Another reason to choose r5 here is it is the Dalvik
+             * frame pointer and cannot be the target of the emulated heap
+             * load.
+             */
+            if (cUnit->usesLinkRegister) {
+                genSelfVerificationPreBranch(cUnit, thisLIR);
+            }
+
             /* Branch to mem op decode template */
             selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx1,
                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
@@ -229,6 +269,11 @@
             selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx2,
                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+
+            /* Restore LR */
+            if (cUnit->usesLinkRegister) {
+                genSelfVerificationPostBranch(cUnit, thisLIR);
+            }
         }
     }
 }
@@ -699,15 +744,18 @@
         // Adjust return regs in to handle case of rem returning r2/r3
         dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
         loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
-        LOAD_FUNC_ADDR(cUnit, rlr, (int) callTgt);
+        LOAD_FUNC_ADDR(cUnit, r14lr, (int) callTgt);
         loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
-        opReg(cUnit, kOpBlx, rlr);
+        opReg(cUnit, kOpBlx, r14lr);
         dvmCompilerClobberCallRegs(cUnit);
         if (retReg == r0)
             rlResult = dvmCompilerGetReturnWide(cUnit);
         else
             rlResult = dvmCompilerGetReturnWideAlt(cUnit);
         storeValueWide(cUnit, rlDest, rlResult);
+#if defined(WITH_SELF_VERIFICATION)
+        cUnit->usesLinkRegister = true;
+#endif
     }
     return false;
 }
@@ -904,7 +952,8 @@
 /* Perform the actual operation for OP_RETURN_* */
 static void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
 {
-    genDispatchToHandler(cUnit, TEMPLATE_RETURN);
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                         TEMPLATE_RETURN_PROF : TEMPLATE_RETURN);
 #if defined(WITH_JIT_TUNING)
     gDvmJit.returnOp++;
 #endif
@@ -912,12 +961,12 @@
     /* Insert branch, but defer setting of target */
     ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
     /* Set up the place holder to reconstruct this Dalvik PC */
-    ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     pcrLabel->opcode = kArmPseudoPCReconstructionCell;
     pcrLabel->operands[0] = dPC;
     pcrLabel->operands[1] = mir->offset;
     /* Insert the place holder to the growable list */
-    dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+    dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
     /* Branch to the PC reconstruction code */
     branch->generic.target = (LIR *) pcrLabel;
 }
@@ -944,7 +993,7 @@
     }
     if (regMask) {
         /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
-        opRegRegImm(cUnit, kOpSub, r7, rFP,
+        opRegRegImm(cUnit, kOpSub, r7, r5FP,
                     sizeof(StackSaveArea) + (dInsn->vA << 2));
         /* generate null check */
         if (pcrLabel) {
@@ -973,19 +1022,22 @@
     dvmCompilerLockAllTemps(cUnit);
 
     /*
-     * r4PC     : &rFP[vC]
+     * r4PC     : &r5FP[vC]
      * r7: &newFP[0]
      */
-    opRegRegImm(cUnit, kOpAdd, r4PC, rFP, srcOffset);
+    opRegRegImm(cUnit, kOpAdd, r4PC, r5FP, srcOffset);
     /* load [r0 .. min(numArgs,4)] */
     regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1;
     /*
      * Protect the loadMultiple instruction from being reordered with other
      * Dalvik stack accesses.
+     *
+     * This code is also shared by the invoke jumbo instructions, and this
+     * does not need to be done if the invoke jumbo has no arguments.
      */
-    loadMultiple(cUnit, r4PC, regMask);
+    if (numArgs != 0) loadMultiple(cUnit, r4PC, regMask);
 
-    opRegRegImm(cUnit, kOpSub, r7, rFP,
+    opRegRegImm(cUnit, kOpSub, r7, r5FP,
                 sizeof(StackSaveArea) + (numArgs << 2));
     /* generate null check */
     if (pcrLabel) {
@@ -1001,9 +1053,9 @@
         ArmLIR *loopLabel = NULL;
         /*
          * r0 contains "this" and it will be used later, so push it to the stack
-         * first. Pushing r5 (rFP) is just for stack alignment purposes.
+         * first. Pushing r5FP is just for stack alignment purposes.
          */
-        opImm(cUnit, kOpPush, (1 << r0 | 1 << rFP));
+        opImm(cUnit, kOpPush, (1 << r0 | 1 << r5FP));
         /* No need to generate the loop structure if numArgs <= 11 */
         if (numArgs > 11) {
             loadConstant(cUnit, 5, ((numArgs - 4) >> 2) << 2);
@@ -1018,13 +1070,13 @@
         loadMultiple(cUnit, r4PC, regMask);
         /* No need to generate the loop structure if numArgs <= 11 */
         if (numArgs > 11) {
-            opRegImm(cUnit, kOpSub, rFP, 4);
+            opRegImm(cUnit, kOpSub, r5FP, 4);
             genConditionalBranch(cUnit, kArmCondNe, loopLabel);
         }
     }
 
     /* Save the last batch of loaded values */
-    storeMultiple(cUnit, r7, regMask);
+    if (numArgs != 0) storeMultiple(cUnit, r7, regMask);
 
     /* Generate the loop epilogue - don't use r0 */
     if ((numArgs > 4) && (numArgs % 4)) {
@@ -1036,7 +1088,7 @@
         loadMultiple(cUnit, r4PC, regMask);
     }
     if (numArgs >= 8)
-        opImm(cUnit, kOpPop, (1 << r0 | 1 << rFP));
+        opImm(cUnit, kOpPop, (1 << r0 | 1 << r5FP));
 
     /* Save the modulo 4 arguments */
     if ((numArgs > 4) && (numArgs % 4)) {
@@ -1062,7 +1114,7 @@
     ArmLIR *retChainingCell = &labelList[bb->fallThrough->id];
 
     /* r1 = &retChainingCell */
-    ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+    ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
 
     /* r4PC = dalvikCallsite */
     loadConstant(cUnit, r4PC,
@@ -1079,14 +1131,18 @@
      * r7 = calleeMethod->registersSize
      */
     if (dvmIsNativeMethod(calleeMethod)) {
-        genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NATIVE);
+        genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+            TEMPLATE_INVOKE_METHOD_NATIVE_PROF :
+            TEMPLATE_INVOKE_METHOD_NATIVE);
 #if defined(WITH_JIT_TUNING)
         gDvmJit.invokeNative++;
 #endif
     } else {
         /* For Java callees, set up r2 to be calleeMethod->outsSize */
         loadConstant(cUnit, r2, calleeMethod->outsSize);
-        genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_CHAIN);
+        genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+            TEMPLATE_INVOKE_METHOD_CHAIN_PROF :
+            TEMPLATE_INVOKE_METHOD_CHAIN);
 #if defined(WITH_JIT_TUNING)
         gDvmJit.invokeMonomorphic++;
 #endif
@@ -1138,14 +1194,16 @@
                  (int) (cUnit->method->insns + mir->offset));
 
     /* r1 = &retChainingCell */
-    ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+    ArmLIR *addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
     addrRetChain->generic.target = (LIR *) retChainingCell;
 
     /* r2 = &predictedChainingCell */
-    ArmLIR *predictedChainingCell = opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
+    ArmLIR *predictedChainingCell = opRegRegImm(cUnit, kOpAdd, r2, r15pc, 0);
     predictedChainingCell->generic.target = (LIR *) predChainingCell;
 
-    genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+        TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+        TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
 
     /* return through lr - jump to the chaining cell */
     genUnconditionalBranch(cUnit, predChainingCell);
@@ -1156,12 +1214,13 @@
      */
     if (pcrLabel == NULL) {
         int dPC = (int) (cUnit->method->insns + mir->offset);
-        pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+        pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
         pcrLabel->opcode = kArmPseudoPCReconstructionCell;
         pcrLabel->operands[0] = dPC;
         pcrLabel->operands[1] = mir->offset;
         /* Insert the place holder to the growable list */
-        dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+        dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                              (intptr_t) pcrLabel);
     }
 
     /* return through lr+2 - punt to the interpreter */
@@ -1184,7 +1243,7 @@
 
     LOAD_FUNC_ADDR(cUnit, r7, (int) dvmJitToPatchPredictedChain);
 
-    genRegCopy(cUnit, r1, rGLUE);
+    genRegCopy(cUnit, r1, r6SELF);
 
     /*
      * r0 = calleeMethod
@@ -1198,7 +1257,7 @@
     opReg(cUnit, kOpBlx, r7);
 
     /* r1 = &retChainingCell */
-    addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+    addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
     addrRetChain->generic.target = (LIR *) retChainingCell;
 
     bypassRechaining->generic.target = (LIR *) addrRetChain;
@@ -1207,7 +1266,9 @@
      * r1 = &ChainingCell,
      * r4PC = callsiteDPC,
      */
-    genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+        TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+        TEMPLATE_INVOKE_METHOD_NO_OPT);
 #if defined(WITH_JIT_TUNING)
     gDvmJit.invokePolymorphic++;
 #endif
@@ -1215,13 +1276,66 @@
     genTrap(cUnit, mir->offset, pcrLabel);
 }
 
+/* "this" pointer is already in r0 */
+static void genInvokeVirtualWholeMethod(CompilationUnit *cUnit,
+                                        MIR *mir,
+                                        void *calleeAddr,
+                                        ArmLIR *retChainingCell)
+{
+    CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
+    dvmCompilerLockAllTemps(cUnit);
+
+    loadClassPointer(cUnit, r1, (int) callsiteInfo);
+
+    loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2);
+    /* Branch to the slow path if classes are not equal */
+    opRegReg(cUnit, kOpCmp, r1, r2);
+    /*
+     * Set the misPredBranchOver target so that it will be generated when the
+     * code for the non-optimized invoke is generated.
+     */
+    ArmLIR *classCheck = opCondBranch(cUnit, kArmCondNe);
+
+    /* r0 = the Dalvik PC of the callsite */
+    loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+
+    newLIR2(cUnit, kThumbBl1, (int) calleeAddr, (int) calleeAddr);
+    newLIR2(cUnit, kThumbBl2, (int) calleeAddr, (int) calleeAddr);
+    genUnconditionalBranch(cUnit, retChainingCell);
+
+    /* Target of slow path */
+    ArmLIR *slowPathLabel = newLIR0(cUnit, kArmPseudoTargetLabel);
+
+    slowPathLabel->defMask = ENCODE_ALL;
+    classCheck->generic.target = (LIR *) slowPathLabel;
+
+    // FIXME
+    cUnit->printMe = true;
+}
+
+static void genInvokeSingletonWholeMethod(CompilationUnit *cUnit,
+                                          MIR *mir,
+                                          void *calleeAddr,
+                                          ArmLIR *retChainingCell)
+{
+    /* r0 = the Dalvik PC of the callsite */
+    loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+
+    newLIR2(cUnit, kThumbBl1, (int) calleeAddr, (int) calleeAddr);
+    newLIR2(cUnit, kThumbBl2, (int) calleeAddr, (int) calleeAddr);
+    genUnconditionalBranch(cUnit, retChainingCell);
+
+    // FIXME
+    cUnit->printMe = true;
+}
+
 /* Geneate a branch to go back to the interpreter */
 static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
 {
     /* r0 = dalvik pc */
     dvmCompilerFlushAllRegs(cUnit);
     loadConstant(cUnit, r0, (int) (cUnit->method->insns + offset));
-    loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
+    loadWordDisp(cUnit, r6SELF, offsetof(Thread,
                  jitToInterpEntries.dvmJitToInterpPunt), r1);
     opReg(cUnit, kOpBlx, r1);
 }
@@ -1247,9 +1361,9 @@
        genPuntToInterp(cUnit, mir->offset);
        return;
     }
-    int entryAddr = offsetof(InterpState,
+    int entryAddr = offsetof(Thread,
                              jitToInterpEntries.dvmJitToInterpSingleStep);
-    loadWordDisp(cUnit, rGLUE, entryAddr, r2);
+    loadWordDisp(cUnit, r6SELF, entryAddr, r2);
     /* r0 = dalvik pc */
     loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
     /* r1 = dalvik pc of following instruction */
@@ -1257,8 +1371,7 @@
     opReg(cUnit, kOpBlx, r2);
 }
 
-#if defined(WITH_DEADLOCK_PREDICTION) || defined(WITH_MONITOR_TRACKING) || \
-    defined(_ARMV5TE) || defined(_ARMV5TE_VFP)
+#if defined(_ARMV5TE) || defined(_ARMV5TE_VFP)
 /*
  * To prevent a thread in a monitor wait from blocking the Jit from
  * resetting the code cache, heavyweight monitor lock will not
@@ -1277,17 +1390,13 @@
     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
     loadValueDirectFixed(cUnit, rlSrc, r1);
-    loadWordDisp(cUnit, rGLUE, offsetof(InterpState, self), r0);
+    genRegCopy(cUnit, r0, r6SELF);
     genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
     if (isEnter) {
         /* Get dPC of next insn */
         loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
                  dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
-#if defined(WITH_DEADLOCK_PREDICTION)
-        genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER_DEBUG);
-#else
         genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
-#endif
     } else {
         LOAD_FUNC_ADDR(cUnit, r2, (int)dvmUnlockObject);
         /* Do the call */
@@ -1307,6 +1416,20 @@
 #endif
 
 /*
+ * Fetch *self->suspendCount. If the suspend count is non-zero,
+ * punt to the interpreter.
+ */
+static void genSuspendPoll(CompilationUnit *cUnit, MIR *mir)
+{
+    int rTemp = dvmCompilerAllocTemp(cUnit);
+    ArmLIR *ld;
+    ld = loadWordDisp(cUnit, r6SELF, offsetof(Thread, suspendCount),
+                      rTemp);
+    setMemRefType(ld, true /* isLoad */, kMustNotAlias);
+    genRegImmCheck(cUnit, kArmCondNe, rTemp, 0, mir->offset, NULL);
+}
+
+/*
  * The following are the first-level codegen routines that analyze the format
  * of each bytecode then either dispatch special purpose codegen routines
  * or produce corresponding Thumb instructions directly.
@@ -1315,8 +1438,25 @@
 static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
                                        BasicBlock *bb, ArmLIR *labelList)
 {
-    /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
-    genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch && gDvmJit.genSuspendPoll) {
+        genSuspendPoll(cUnit, mir);
+    }
+
+    int numPredecessors = dvmCountSetBits(bb->taken->predecessors);
+    /*
+     * Things could be hoisted out of the taken block into the predecessor, so
+     * make sure it is dominated by the predecessor.
+     */
+    if (numPredecessors == 1 && bb->taken->visited == false &&
+        bb->taken->blockType == kDalvikByteCode) {
+        cUnit->nextCodegenBlock = bb->taken;
+    } else {
+        /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
+        genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+    }
     return false;
 }
 
@@ -1412,14 +1552,14 @@
     return false;
 }
 
-static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt20bc_Fmt40sc(CompilationUnit *cUnit, MIR *mir)
 {
-    /* For OP_THROW_VERIFICATION_ERROR */
+    /* For OP_THROW_VERIFICATION_ERROR & OP_THROW_VERIFICATION_ERROR_JUMBO */
     genInterpSingleStep(cUnit, mir);
     return false;
 }
 
-static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt21c_Fmt31c_Fmt41c(CompilationUnit *cUnit, MIR *mir)
 {
     RegLocation rlResult;
     RegLocation rlDest;
@@ -1442,7 +1582,8 @@
             storeValue(cUnit, rlDest, rlResult);
             break;
         }
-        case OP_CONST_CLASS: {
+        case OP_CONST_CLASS:
+        case OP_CONST_CLASS_JUMBO: {
             void *classPtr = (void*)
               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
 
@@ -1457,14 +1598,20 @@
             storeValue(cUnit, rlDest, rlResult);
             break;
         }
+        case OP_SGET:
         case OP_SGET_VOLATILE:
-        case OP_SGET_OBJECT_VOLATILE:
+        case OP_SGET_JUMBO:
         case OP_SGET_OBJECT:
+        case OP_SGET_OBJECT_VOLATILE:
+        case OP_SGET_OBJECT_JUMBO:
         case OP_SGET_BOOLEAN:
+        case OP_SGET_BOOLEAN_JUMBO:
         case OP_SGET_CHAR:
+        case OP_SGET_CHAR_JUMBO:
         case OP_SGET_BYTE:
+        case OP_SGET_BYTE_JUMBO:
         case OP_SGET_SHORT:
-        case OP_SGET: {
+        case OP_SGET_SHORT_JUMBO: {
             int valOffset = offsetof(StaticField, value);
             int tReg = dvmCompilerAllocTemp(cUnit);
             bool isVolatile;
@@ -1480,7 +1627,7 @@
 
             isVolatile = (mir->dalvikInsn.opcode == OP_SGET_VOLATILE) ||
                          (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE) ||
-                         dvmIsVolatileField(fieldPtr);
+                         dvmIsVolatileField((Field *) fieldPtr);
 
             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
@@ -1496,7 +1643,8 @@
             storeValue(cUnit, rlDest, rlResult);
             break;
         }
-        case OP_SGET_WIDE: {
+        case OP_SGET_WIDE:
+        case OP_SGET_WIDE_JUMBO: {
             int valOffset = offsetof(StaticField, value);
             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
                 mir->meta.calleeMethod : cUnit->method;
@@ -1520,14 +1668,20 @@
             storeValueWide(cUnit, rlDest, rlResult);
             break;
         }
+        case OP_SPUT:
+        case OP_SPUT_VOLATILE:
+        case OP_SPUT_JUMBO:
         case OP_SPUT_OBJECT:
         case OP_SPUT_OBJECT_VOLATILE:
-        case OP_SPUT_VOLATILE:
+        case OP_SPUT_OBJECT_JUMBO:
         case OP_SPUT_BOOLEAN:
+        case OP_SPUT_BOOLEAN_JUMBO:
         case OP_SPUT_CHAR:
+        case OP_SPUT_CHAR_JUMBO:
         case OP_SPUT_BYTE:
+        case OP_SPUT_BYTE_JUMBO:
         case OP_SPUT_SHORT:
-        case OP_SPUT: {
+        case OP_SPUT_SHORT_JUMBO: {
             int valOffset = offsetof(StaticField, value);
             int tReg = dvmCompilerAllocTemp(cUnit);
             int objHead;
@@ -1540,9 +1694,10 @@
 
             isVolatile = (mir->dalvikInsn.opcode == OP_SPUT_VOLATILE) ||
                          (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE) ||
-                         dvmIsVolatileField(fieldPtr);
+                         dvmIsVolatileField((Field *) fieldPtr);
 
             isSputObject = (mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
+                           (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_JUMBO) ||
                            (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE);
 
             if (fieldPtr == NULL) {
@@ -1572,7 +1727,8 @@
 
             break;
         }
-        case OP_SPUT_WIDE: {
+        case OP_SPUT_WIDE:
+        case OP_SPUT_WIDE_JUMBO: {
             int tReg = dvmCompilerAllocTemp(cUnit);
             int valOffset = offsetof(StaticField, value);
             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
@@ -1594,12 +1750,13 @@
             HEAP_ACCESS_SHADOW(false);
             break;
         }
-        case OP_NEW_INSTANCE: {
+        case OP_NEW_INSTANCE:
+        case OP_NEW_INSTANCE_JUMBO: {
             /*
              * Obey the calling convention and don't mess with the register
              * usage.
              */
-            ClassObject *classPtr = (void*)
+            ClassObject *classPtr = (ClassObject *)
               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
 
             if (classPtr == NULL) {
@@ -1637,7 +1794,8 @@
             storeValue(cUnit, rlDest, rlResult);
             break;
         }
-        case OP_CHECK_CAST: {
+        case OP_CHECK_CAST:
+        case OP_CHECK_CAST_JUMBO: {
             /*
              * Obey the calling convention and don't mess with the register
              * usage.
@@ -1721,16 +1879,13 @@
     RegLocation rlResult;
     switch (dalvikOpcode) {
         case OP_MOVE_EXCEPTION: {
-            int offset = offsetof(InterpState, self);
             int exOffset = offsetof(Thread, exception);
-            int selfReg = dvmCompilerAllocTemp(cUnit);
             int resetReg = dvmCompilerAllocTemp(cUnit);
             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
-            loadWordDisp(cUnit, rGLUE, offset, selfReg);
+            loadWordDisp(cUnit, r6SELF, exOffset, rlResult.lowReg);
             loadConstant(cUnit, resetReg, 0);
-            loadWordDisp(cUnit, selfReg, exOffset, rlResult.lowReg);
-            storeWordDisp(cUnit, selfReg, exOffset, resetReg);
+            storeWordDisp(cUnit, r6SELF, exOffset, resetReg);
             storeValue(cUnit, rlDest, rlResult);
            break;
         }
@@ -1769,21 +1924,16 @@
             RegLocation rlDest = LOC_DALVIK_RETURN_VAL;
             rlDest.fp = rlSrc.fp;
             storeValue(cUnit, rlDest, rlSrc);
-            genReturnCommon(cUnit,mir);
+            genReturnCommon(cUnit, mir);
             break;
         }
         case OP_MONITOR_EXIT:
         case OP_MONITOR_ENTER:
-#if defined(WITH_DEADLOCK_PREDICTION) || defined(WITH_MONITOR_TRACKING)
-            genMonitorPortable(cUnit, mir);
-#else
             genMonitor(cUnit, mir);
-#endif
             break;
-        case OP_THROW: {
+        case OP_THROW:
             genInterpSingleStep(cUnit, mir);
             break;
-        }
         default:
             return true;
     }
@@ -1920,8 +2070,16 @@
 {
     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
     ArmConditionCode cond;
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch && gDvmJit.genSuspendPoll) {
+        genSuspendPoll(cUnit, mir);
+    }
+
     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+
     opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
 
 //TUNING: break this out to allow use of Thumb2 CB[N]Z
@@ -2192,7 +2350,7 @@
     return false;
 }
 
-static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt22c_Fmt52c(CompilationUnit *cUnit, MIR *mir)
 {
     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
     int fieldOffset = -1;
@@ -2206,22 +2364,36 @@
          */
         case OP_IGET:
         case OP_IGET_VOLATILE:
+        case OP_IGET_JUMBO:
         case OP_IGET_WIDE:
+        case OP_IGET_WIDE_JUMBO:
         case OP_IGET_OBJECT:
         case OP_IGET_OBJECT_VOLATILE:
+        case OP_IGET_OBJECT_JUMBO:
         case OP_IGET_BOOLEAN:
+        case OP_IGET_BOOLEAN_JUMBO:
         case OP_IGET_BYTE:
+        case OP_IGET_BYTE_JUMBO:
         case OP_IGET_CHAR:
+        case OP_IGET_CHAR_JUMBO:
         case OP_IGET_SHORT:
+        case OP_IGET_SHORT_JUMBO:
         case OP_IPUT:
         case OP_IPUT_VOLATILE:
+        case OP_IPUT_JUMBO:
         case OP_IPUT_WIDE:
+        case OP_IPUT_WIDE_JUMBO:
         case OP_IPUT_OBJECT:
         case OP_IPUT_OBJECT_VOLATILE:
+        case OP_IPUT_OBJECT_JUMBO:
         case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BOOLEAN_JUMBO:
         case OP_IPUT_BYTE:
+        case OP_IPUT_BYTE_JUMBO:
         case OP_IPUT_CHAR:
-        case OP_IPUT_SHORT: {
+        case OP_IPUT_CHAR_JUMBO:
+        case OP_IPUT_SHORT:
+        case OP_IPUT_SHORT_JUMBO: {
             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
                 mir->meta.calleeMethod : cUnit->method;
             Field *fieldPtr =
@@ -2240,7 +2412,8 @@
     }
 
     switch (dalvikOpcode) {
-        case OP_NEW_ARRAY: {
+        case OP_NEW_ARRAY:
+        case OP_NEW_ARRAY_JUMBO: {
             // Generates a call - use explicit registers
             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
@@ -2283,7 +2456,8 @@
             storeValue(cUnit, rlDest, rlResult);
             break;
         }
-        case OP_INSTANCE_OF: {
+        case OP_INSTANCE_OF:
+        case OP_INSTANCE_OF_JUMBO: {
             // May generate a call - use explicit registers
             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
@@ -2329,6 +2503,7 @@
             break;
         }
         case OP_IGET_WIDE:
+        case OP_IGET_WIDE_JUMBO:
             genIGetWide(cUnit, mir, fieldOffset);
             break;
         case OP_IGET_VOLATILE:
@@ -2336,21 +2511,33 @@
             isVolatile = true;
             // NOTE: intentional fallthrough
         case OP_IGET:
+        case OP_IGET_JUMBO:
         case OP_IGET_OBJECT:
+        case OP_IGET_OBJECT_JUMBO:
         case OP_IGET_BOOLEAN:
+        case OP_IGET_BOOLEAN_JUMBO:
         case OP_IGET_BYTE:
+        case OP_IGET_BYTE_JUMBO:
         case OP_IGET_CHAR:
+        case OP_IGET_CHAR_JUMBO:
         case OP_IGET_SHORT:
+        case OP_IGET_SHORT_JUMBO:
             genIGet(cUnit, mir, kWord, fieldOffset, isVolatile);
             break;
         case OP_IPUT_WIDE:
+        case OP_IPUT_WIDE_JUMBO:
             genIPutWide(cUnit, mir, fieldOffset);
             break;
         case OP_IPUT:
-        case OP_IPUT_SHORT:
-        case OP_IPUT_CHAR:
-        case OP_IPUT_BYTE:
+        case OP_IPUT_JUMBO:
         case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BOOLEAN_JUMBO:
+        case OP_IPUT_BYTE:
+        case OP_IPUT_BYTE_JUMBO:
+        case OP_IPUT_CHAR:
+        case OP_IPUT_CHAR_JUMBO:
+        case OP_IPUT_SHORT:
+        case OP_IPUT_SHORT_JUMBO:
             genIPut(cUnit, mir, kWord, fieldOffset, false, isVolatile);
             break;
         case OP_IPUT_VOLATILE:
@@ -2358,6 +2545,7 @@
             isVolatile = true;
             // NOTE: intentional fallthrough
         case OP_IPUT_OBJECT:
+        case OP_IPUT_OBJECT_JUMBO:
             genIPut(cUnit, mir, kWord, fieldOffset, true, isVolatile);
             break;
         case OP_IGET_WIDE_VOLATILE:
@@ -2404,11 +2592,19 @@
 {
     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
     ArmConditionCode cond;
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch && gDvmJit.genSuspendPoll) {
+        genSuspendPoll(cUnit, mir);
+    }
+
     RegLocation rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
     RegLocation rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
 
     rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
     rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+
     opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
 
     switch (dalvikOpcode) {
@@ -2734,11 +2930,11 @@
             loadConstant(cUnit, r0,
                (int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
             /* r2 <- pc of the instruction following the blx */
-            opRegReg(cUnit, kOpMov, r2, rpc);
+            opRegReg(cUnit, kOpMov, r2, r15pc);
             opReg(cUnit, kOpBlx, r4PC);
             dvmCompilerClobberCallRegs(cUnit);
             /* pc <- computed goto target */
-            opRegReg(cUnit, kOpMov, rpc, r0);
+            opRegReg(cUnit, kOpMov, r15pc, r0);
             break;
         }
         default:
@@ -2778,8 +2974,8 @@
     mir->meta.callsiteInfo->misPredBranchOver->target = (LIR *) target;
 }
 
-static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
-                             ArmLIR *labelList)
+static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
+                             BasicBlock *bb, ArmLIR *labelList)
 {
     ArmLIR *retChainingCell = NULL;
     ArmLIR *pcrLabel = NULL;
@@ -2799,7 +2995,8 @@
          * ]
          */
         case OP_INVOKE_VIRTUAL:
-        case OP_INVOKE_VIRTUAL_RANGE: {
+        case OP_INVOKE_VIRTUAL_RANGE:
+        case OP_INVOKE_VIRTUAL_JUMBO: {
             ArmLIR *predChainingCell = &labelList[bb->taken->id];
             int methodIndex =
                 cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]->
@@ -2830,7 +3027,8 @@
          *                ->pResMethods[BBBB]->methodIndex]
          */
         case OP_INVOKE_SUPER:
-        case OP_INVOKE_SUPER_RANGE: {
+        case OP_INVOKE_SUPER_RANGE:
+        case OP_INVOKE_SUPER_JUMBO: {
             /* Grab the method ptr directly from what the interpreter sees */
             const Method *calleeMethod = mir->meta.callsiteInfo->method;
             assert(calleeMethod == cUnit->method->clazz->super->vtable[
@@ -2842,16 +3040,25 @@
             else
                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
 
-            /* r0 = calleeMethod */
-            loadConstant(cUnit, r0, (int) calleeMethod);
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+                                              retChainingCell);
+            } else {
+                /* r0 = calleeMethod */
+                loadConstant(cUnit, r0, (int) calleeMethod);
 
-            genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
-                                     calleeMethod);
+                genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                         calleeMethod);
+            }
             break;
         }
         /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
         case OP_INVOKE_DIRECT:
-        case OP_INVOKE_DIRECT_RANGE: {
+        case OP_INVOKE_DIRECT_RANGE:
+        case OP_INVOKE_DIRECT_JUMBO: {
             /* Grab the method ptr directly from what the interpreter sees */
             const Method *calleeMethod = mir->meta.callsiteInfo->method;
             assert(calleeMethod ==
@@ -2871,7 +3078,8 @@
         }
         /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
         case OP_INVOKE_STATIC:
-        case OP_INVOKE_STATIC_RANGE: {
+        case OP_INVOKE_STATIC_RANGE:
+        case OP_INVOKE_STATIC_JUMBO: {
             /* Grab the method ptr directly from what the interpreter sees */
             const Method *calleeMethod = mir->meta.callsiteInfo->method;
             assert(calleeMethod ==
@@ -2884,11 +3092,19 @@
                 genProcessArgsRange(cUnit, mir, dInsn,
                                     NULL /* no null check */);
 
-            /* r0 = calleeMethod */
-            loadConstant(cUnit, r0, (int) calleeMethod);
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+                                              retChainingCell);
+            } else {
+                /* r0 = calleeMethod */
+                loadConstant(cUnit, r0, (int) calleeMethod);
 
-            genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
-                                     calleeMethod);
+                genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                         calleeMethod);
+            }
             break;
         }
         /*
@@ -2963,7 +3179,8 @@
          * 0x47357ebc : .word (0x425719dc)
          */
         case OP_INVOKE_INTERFACE:
-        case OP_INVOKE_INTERFACE_RANGE: {
+        case OP_INVOKE_INTERFACE_RANGE:
+        case OP_INVOKE_INTERFACE_JUMBO: {
             ArmLIR *predChainingCell = &labelList[bb->taken->id];
 
             /*
@@ -2988,15 +3205,17 @@
 
             /* r1 = &retChainingCell */
             ArmLIR *addrRetChain =
-                opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+                opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
             addrRetChain->generic.target = (LIR *) retChainingCell;
 
             /* r2 = &predictedChainingCell */
             ArmLIR *predictedChainingCell =
-                opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
+                opRegRegImm(cUnit, kOpAdd, r2, r15pc, 0);
             predictedChainingCell->generic.target = (LIR *) predChainingCell;
 
-            genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+            genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+                TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
 
             /* return through lr - jump to the chaining cell */
             genUnconditionalBranch(cUnit, predChainingCell);
@@ -3007,12 +3226,13 @@
              */
             if (pcrLabel == NULL) {
                 int dPC = (int) (cUnit->method->insns + mir->offset);
-                pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+                pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
                 pcrLabel->opcode = kArmPseudoPCReconstructionCell;
                 pcrLabel->operands[0] = dPC;
                 pcrLabel->operands[1] = mir->offset;
                 /* Insert the place holder to the growable list */
-                dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+                dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                                      (intptr_t) pcrLabel);
             }
 
             /* return through lr+2 - punt to the interpreter */
@@ -3072,7 +3292,7 @@
 
             LOAD_FUNC_ADDR(cUnit, r7, (int) dvmJitToPatchPredictedChain);
 
-            genRegCopy(cUnit, r1, rGLUE);
+            genRegCopy(cUnit, r1, r6SELF);
             genRegCopy(cUnit, r2, r9);
             genRegCopy(cUnit, r3, r10);
 
@@ -3088,7 +3308,7 @@
             opReg(cUnit, kOpBlx, r7);
 
             /* r1 = &retChainingCell */
-            addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, rpc, 0);
+            addrRetChain = opRegRegImm(cUnit, kOpAdd, r1, r15pc, 0);
             addrRetChain->generic.target = (LIR *) retChainingCell;
 
             bypassRechaining->generic.target = (LIR *) addrRetChain;
@@ -3098,7 +3318,9 @@
              * r1 = &ChainingCell,
              * r4PC = callsiteDPC,
              */
-            genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
+            genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+                TEMPLATE_INVOKE_METHOD_NO_OPT);
 #if defined(WITH_JIT_TUNING)
             gDvmJit.invokePolymorphic++;
 #endif
@@ -3106,12 +3328,13 @@
             genTrap(cUnit, mir->offset, pcrLabel);
             break;
         }
-        /* NOP */
-        case OP_INVOKE_DIRECT_EMPTY: {
-            return false;
+        case OP_INVOKE_OBJECT_INIT_RANGE: {
+            genInterpSingleStep(cUnit, mir);
+            break;
         }
         case OP_FILLED_NEW_ARRAY:
-        case OP_FILLED_NEW_ARRAY_RANGE: {
+        case OP_FILLED_NEW_ARRAY_RANGE:
+        case OP_FILLED_NEW_ARRAY_JUMBO: {
             /* Just let the interpreter deal with these */
             genInterpSingleStep(cUnit, mir);
             break;
@@ -3154,6 +3377,15 @@
             else
                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
 
+
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeVirtualWholeMethod(cUnit, mir, calleeAddr,
+                                            retChainingCell);
+            }
+
             genInvokeVirtualCommon(cUnit, mir, methodIndex,
                                    retChainingCell,
                                    predChainingCell,
@@ -3195,7 +3427,7 @@
 static bool genInlinedCompareTo(CompilationUnit *cUnit, MIR *mir)
 {
 #if defined(USE_GLOBAL_STRING_DEFS)
-    return false;
+    return handleExecuteInlineC(cUnit, mir);
 #else
     ArmLIR *rollback;
     RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
@@ -3214,14 +3446,14 @@
     genDispatchToHandler(cUnit, TEMPLATE_STRING_COMPARETO);
     storeValue(cUnit, inlinedTarget(cUnit, mir, false),
                dvmCompilerGetReturn(cUnit));
-    return true;
+    return false;
 #endif
 }
 
 static bool genInlinedFastIndexOf(CompilationUnit *cUnit, MIR *mir)
 {
 #if defined(USE_GLOBAL_STRING_DEFS)
-    return false;
+    return handleExecuteInlineC(cUnit, mir);
 #else
     RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
     RegLocation rlChar = dvmCompilerGetSrc(cUnit, mir, 1);
@@ -3235,7 +3467,7 @@
     genDispatchToHandler(cUnit, TEMPLATE_STRING_INDEXOF);
     storeValue(cUnit, inlinedTarget(cUnit, mir, false),
                dvmCompilerGetReturn(cUnit));
-    return true;
+    return false;
 #endif
 }
 
@@ -3358,103 +3590,102 @@
 }
 
 /*
+ * JITs a call to a C function.
+ * TODO: use this for faster native method invocation for simple native
+ * methods (http://b/3069458).
+ */
+static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
+{
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    int operation = dInsn->vB;
+    unsigned int i;
+    const InlineOperation* inLineTable = dvmGetInlineOpsTable();
+    uintptr_t fn = (int) inLineTable[operation].func;
+    if (fn == 0) {
+        dvmCompilerAbort(cUnit);
+    }
+    dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+    dvmCompilerClobberCallRegs(cUnit);
+    dvmCompilerClobber(cUnit, r4PC);
+    dvmCompilerClobber(cUnit, r7);
+    int offset = offsetof(Thread, retval);
+    opRegRegImm(cUnit, kOpAdd, r4PC, r6SELF, offset);
+    opImm(cUnit, kOpPush, (1<<r4PC) | (1<<r7));
+    LOAD_FUNC_ADDR(cUnit, r4PC, fn);
+    genExportPC(cUnit, mir);
+    for (i=0; i < dInsn->vA; i++) {
+        loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i);
+    }
+    opReg(cUnit, kOpBlx, r4PC);
+    opRegImm(cUnit, kOpAdd, r13sp, 8);
+    /* NULL? */
+    ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
+    loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+    genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+    ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
+    target->defMask = ENCODE_ALL;
+    branchOver->generic.target = (LIR *) target;
+    return false;
+}
+
+/*
  * NOTE: Handles both range and non-range versions (arguments
  * have already been normalized by this point).
  */
 static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
 {
     DecodedInstruction *dInsn = &mir->dalvikInsn;
-    switch( mir->dalvikInsn.opcode) {
-        case OP_EXECUTE_INLINE_RANGE:
-        case OP_EXECUTE_INLINE: {
-            unsigned int i;
-            const InlineOperation* inLineTable = dvmGetInlineOpsTable();
-            int offset = offsetof(InterpState, retval);
-            int operation = dInsn->vB;
-            switch (operation) {
-                case INLINE_EMPTYINLINEMETHOD:
-                    return false;  /* Nop */
-                case INLINE_STRING_LENGTH:
-                    return genInlinedStringLength(cUnit, mir);
-                case INLINE_STRING_IS_EMPTY:
-                    return genInlinedStringIsEmpty(cUnit, mir);
-                case INLINE_MATH_ABS_INT:
-                    return genInlinedAbsInt(cUnit, mir);
-                case INLINE_MATH_ABS_LONG:
-                    return genInlinedAbsLong(cUnit, mir);
-                case INLINE_MATH_MIN_INT:
-                    return genInlinedMinMaxInt(cUnit, mir, true);
-                case INLINE_MATH_MAX_INT:
-                    return genInlinedMinMaxInt(cUnit, mir, false);
-                case INLINE_STRING_CHARAT:
-                    return genInlinedStringCharAt(cUnit, mir);
-                case INLINE_MATH_SQRT:
-                    if (genInlineSqrt(cUnit, mir))
-                        return false;
-                    else
-                        break;   /* Handle with C routine */
-                case INLINE_MATH_ABS_FLOAT:
-                    if (genInlinedAbsFloat(cUnit, mir))
-                        return false;
-                    else
-                        break;
-                case INLINE_MATH_ABS_DOUBLE:
-                    if (genInlinedAbsDouble(cUnit, mir))
-                        return false;
-                    else
-                        break;
-                case INLINE_STRING_COMPARETO:
-                    if (genInlinedCompareTo(cUnit, mir))
-                        return false;
-                    else
-                        break;
-                case INLINE_STRING_FASTINDEXOF_II:
-                    if (genInlinedFastIndexOf(cUnit, mir))
-                        return false;
-                    else
-                        break;
-                case INLINE_FLOAT_TO_RAW_INT_BITS:
-                case INLINE_INT_BITS_TO_FLOAT:
-                    return genInlinedIntFloatConversion(cUnit, mir);
-                case INLINE_DOUBLE_TO_RAW_LONG_BITS:
-                case INLINE_LONG_BITS_TO_DOUBLE:
-                    return genInlinedLongDoubleConversion(cUnit, mir);
-                case INLINE_STRING_EQUALS:
-                case INLINE_MATH_COS:
-                case INLINE_MATH_SIN:
-                case INLINE_FLOAT_TO_INT_BITS:
-                case INLINE_DOUBLE_TO_LONG_BITS:
-                    break;   /* Handle with C routine */
-                default:
-                    dvmCompilerAbort(cUnit);
-            }
-            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
-            dvmCompilerClobberCallRegs(cUnit);
-            dvmCompilerClobber(cUnit, r4PC);
-            dvmCompilerClobber(cUnit, r7);
-            opRegRegImm(cUnit, kOpAdd, r4PC, rGLUE, offset);
-            opImm(cUnit, kOpPush, (1<<r4PC) | (1<<r7));
-            LOAD_FUNC_ADDR(cUnit, r4PC, (int)inLineTable[operation].func);
-            genExportPC(cUnit, mir);
-            for (i=0; i < dInsn->vA; i++) {
-                loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i);
-            }
-            opReg(cUnit, kOpBlx, r4PC);
-            opRegImm(cUnit, kOpAdd, r13, 8);
-            /* NULL? */
-            ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
-            loadConstant(cUnit, r0,
-                         (int) (cUnit->method->insns + mir->offset));
-            genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
-            ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
-            target->defMask = ENCODE_ALL;
-            branchOver->generic.target = (LIR *) target;
-            break;
-        }
-        default:
-            return true;
+    assert(dInsn->opcode == OP_EXECUTE_INLINE_RANGE ||
+           dInsn->opcode == OP_EXECUTE_INLINE);
+    switch (dInsn->vB) {
+        case INLINE_EMPTYINLINEMETHOD:
+            return false;  /* Nop */
+
+        /* These ones we potentially JIT inline. */
+        case INLINE_STRING_LENGTH:
+            return genInlinedStringLength(cUnit, mir);
+        case INLINE_STRING_IS_EMPTY:
+            return genInlinedStringIsEmpty(cUnit, mir);
+        case INLINE_MATH_ABS_INT:
+            return genInlinedAbsInt(cUnit, mir);
+        case INLINE_MATH_ABS_LONG:
+            return genInlinedAbsLong(cUnit, mir);
+        case INLINE_MATH_MIN_INT:
+            return genInlinedMinMaxInt(cUnit, mir, true);
+        case INLINE_MATH_MAX_INT:
+            return genInlinedMinMaxInt(cUnit, mir, false);
+        case INLINE_STRING_CHARAT:
+            return genInlinedStringCharAt(cUnit, mir);
+        case INLINE_MATH_SQRT:
+            return genInlineSqrt(cUnit, mir);
+        case INLINE_MATH_ABS_FLOAT:
+            return genInlinedAbsFloat(cUnit, mir);
+        case INLINE_MATH_ABS_DOUBLE:
+            return genInlinedAbsDouble(cUnit, mir);
+        case INLINE_STRING_COMPARETO:
+            return genInlinedCompareTo(cUnit, mir);
+        case INLINE_STRING_FASTINDEXOF_II:
+            return genInlinedFastIndexOf(cUnit, mir);
+        case INLINE_FLOAT_TO_RAW_INT_BITS:
+        case INLINE_INT_BITS_TO_FLOAT:
+            return genInlinedIntFloatConversion(cUnit, mir);
+        case INLINE_DOUBLE_TO_RAW_LONG_BITS:
+        case INLINE_LONG_BITS_TO_DOUBLE:
+            return genInlinedLongDoubleConversion(cUnit, mir);
+
+        /*
+         * These ones we just JIT a call to a C function for.
+         * TODO: special-case these in the other "invoke" call paths.
+         */
+        case INLINE_STRING_EQUALS:
+        case INLINE_MATH_COS:
+        case INLINE_MATH_SIN:
+        case INLINE_FLOAT_TO_INT_BITS:
+        case INLINE_DOUBLE_TO_LONG_BITS:
+            return handleExecuteInlineC(cUnit, mir);
     }
-    return false;
+    dvmCompilerAbort(cUnit);
+    return false; // Not reachable; keeps compiler happy.
 }
 
 static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
@@ -3506,11 +3737,11 @@
      * instructions fit the predefined cell size.
      */
     insertChainingSwitch(cUnit);
-    newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
-            offsetof(InterpState,
+    newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
+            offsetof(Thread,
                      jitToInterpEntries.dvmJitToInterpNormal) >> 2);
     newLIR1(cUnit, kThumbBlxR, r0);
-    addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
 }
 
 /*
@@ -3525,14 +3756,13 @@
      * instructions fit the predefined cell size.
      */
     insertChainingSwitch(cUnit);
-    newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
-            offsetof(InterpState,
+    newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
+            offsetof(Thread,
                      jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
     newLIR1(cUnit, kThumbBlxR, r0);
-    addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
 }
 
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
 /* Chaining cell for branches that branch back into the same basic block */
 static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
                                              unsigned int offset)
@@ -3543,18 +3773,17 @@
      */
     insertChainingSwitch(cUnit);
 #if defined(WITH_SELF_VERIFICATION)
-    newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
-        offsetof(InterpState,
+    newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
+        offsetof(Thread,
                  jitToInterpEntries.dvmJitToInterpBackwardBranch) >> 2);
 #else
-    newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
-        offsetof(InterpState, jitToInterpEntries.dvmJitToInterpNormal) >> 2);
+    newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal) >> 2);
 #endif
     newLIR1(cUnit, kThumbBlxR, r0);
-    addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
 }
 
-#endif
 /* Chaining cell for monomorphic method invocations. */
 static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
                                               const Method *callee)
@@ -3564,11 +3793,11 @@
      * instructions fit the predefined cell size.
      */
     insertChainingSwitch(cUnit);
-    newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
-            offsetof(InterpState,
+    newLIR3(cUnit, kThumbLdrRRI5, r0, r6SELF,
+            offsetof(Thread,
                      jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
     newLIR1(cUnit, kThumbBlxR, r0);
-    addWordData(cUnit, (int) (callee->insns), true);
+    addWordData(cUnit, NULL, (int) (callee->insns));
 }
 
 /* Chaining cell for monomorphic method invocations. */
@@ -3576,16 +3805,16 @@
 {
 
     /* Should not be executed in the initial state */
-    addWordData(cUnit, PREDICTED_CHAIN_BX_PAIR_INIT, true);
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_BX_PAIR_INIT);
     /* To be filled: class */
-    addWordData(cUnit, PREDICTED_CHAIN_CLAZZ_INIT, true);
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_CLAZZ_INIT);
     /* To be filled: method */
-    addWordData(cUnit, PREDICTED_CHAIN_METHOD_INIT, true);
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_METHOD_INIT);
     /*
      * Rechain count. The initial value of 0 here will trigger chaining upon
      * the first invocation of this callsite.
      */
-    addWordData(cUnit, PREDICTED_CHAIN_COUNTER_INIT, true);
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_COUNTER_INIT);
 }
 
 /* Load the Dalvik PC into r0 and jump to the specified target */
@@ -3813,7 +4042,7 @@
 
     rlThis = loadValue(cUnit, rlThis, kCoreReg);
     int regPredictedClass = dvmCompilerAllocTemp(cUnit);
-    loadConstant(cUnit, regPredictedClass, (int) callsiteInfo->clazz);
+    loadClassPointer(cUnit, regPredictedClass, (int) callsiteInfo);
     genNullCheck(cUnit, rlThis.sRegLow, rlThis.lowReg, mir->offset,
                  NULL);/* null object? */
     int regActualClass = dvmCompilerAllocTemp(cUnit);
@@ -3830,8 +4059,8 @@
 static void handleExtendedMIR(CompilationUnit *cUnit, MIR *mir)
 {
     int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
-    char *msg = dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
-                               false);
+    char *msg = (char *)dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
+                                        false);
     strcpy(msg, extendedMIROpNames[opOffset]);
     newLIR1(cUnit, kArmPseudoExtended, (int) msg);
 
@@ -3878,25 +4107,25 @@
                                 ArmLIR *bodyLabel)
 {
     /* Set up the place holder to reconstruct this Dalvik PC */
-    ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     pcrLabel->opcode = kArmPseudoPCReconstructionCell;
     pcrLabel->operands[0] =
         (int) (cUnit->method->insns + entry->startOffset);
     pcrLabel->operands[1] = entry->startOffset;
     /* Insert the place holder to the growable list */
-    dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+    dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
 
     /*
      * Next, create two branches - one branch over to the loop body and the
      * other branch to the PCR cell to punt.
      */
-    ArmLIR *branchToBody = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *branchToBody = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     branchToBody->opcode = kThumbBUncond;
     branchToBody->generic.target = (LIR *) bodyLabel;
     setupResourceMasks(branchToBody);
     cUnit->loopAnalysis->branchToBody = (LIR *) branchToBody;
 
-    ArmLIR *branchToPCR = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *branchToPCR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     branchToPCR->opcode = kThumbBUncond;
     branchToPCR->generic.target = (LIR *) pcrLabel;
     setupResourceMasks(branchToPCR);
@@ -3926,7 +4155,7 @@
 {
     /* Used to hold the labels of each block */
     ArmLIR *labelList =
-        dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
+        (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
     GrowableList chainingListByType[kChainingCellGap];
     int i;
 
@@ -3937,51 +4166,27 @@
         dvmInitGrowableList(&chainingListByType[i], 2);
     }
 
-    BasicBlock **blockList = cUnit->blockList;
+    /* Clear the visited flag for each block */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerClearVisitedFlag,
+                                          kAllNodes, false /* isIterative */);
 
-    if (cUnit->executionCount) {
-        /*
-         * Reserve 6 bytes at the beginning of the trace
-         *        +----------------------------+
-         *        | execution count (4 bytes)  |
-         *        +----------------------------+
-         *        | chain cell offset (2 bytes)|
-         *        +----------------------------+
-         * ...and then code to increment the execution
-         * count:
-         *       mov   r0, pc       @ move adr of "mov r0,pc" + 4 to r0
-         *       sub   r0, #10      @ back up to addr of executionCount
-         *       ldr   r1, [r0]
-         *       add   r1, #1
-         *       str   r1, [r0]
-         */
-        newLIR1(cUnit, kArm16BitData, 0);
-        newLIR1(cUnit, kArm16BitData, 0);
-        cUnit->chainCellOffsetLIR =
-            (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
-        cUnit->headerSize = 6;
-        /* Thumb instruction used directly here to ensure correct size */
-        newLIR2(cUnit, kThumbMovRR_H2L, r0, rpc);
-        newLIR2(cUnit, kThumbSubRI8, r0, 10);
-        newLIR3(cUnit, kThumbLdrRRI5, r1, r0, 0);
-        newLIR2(cUnit, kThumbAddRI8, r1, 1);
-        newLIR3(cUnit, kThumbStrRRI5, r1, r0, 0);
-    } else {
-         /* Just reserve 2 bytes for the chain cell offset */
-        cUnit->chainCellOffsetLIR =
-            (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
-        cUnit->headerSize = 2;
-    }
+    GrowableListIterator iterator;
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
+
+    /* Traces start with a profiling entry point.  Generate it here */
+    cUnit->profileCodeSize = genTraceProfileEntry(cUnit);
 
     /* Handle the content in each basic block */
-    for (i = 0; i < cUnit->numBlocks; i++) {
-        blockList[i]->visited = true;
+    for (i = 0; ; i++) {
         MIR *mir;
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
+        if (bb->visited == true) continue;
 
-        labelList[i].operands[0] = blockList[i]->startOffset;
+        labelList[i].operands[0] = bb->startOffset;
 
-        if (blockList[i]->blockType >= kChainingCellGap) {
-            if (blockList[i]->isFallThroughFromInvoke == true) {
+        if (bb->blockType >= kChainingCellGap) {
+            if (bb->isFallThroughFromInvoke == true) {
                 /* Align this block first since it is a return chaining cell */
                 newLIR0(cUnit, kArmPseudoPseudoAlign4);
             }
@@ -3992,56 +4197,53 @@
             dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]);
         }
 
-        if (blockList[i]->blockType == kTraceEntryBlock) {
+        if (bb->blockType == kTraceEntryBlock) {
             labelList[i].opcode = kArmPseudoEntryBlock;
-            if (blockList[i]->firstMIRInsn == NULL) {
+            if (bb->firstMIRInsn == NULL) {
                 continue;
             } else {
-              setupLoopEntryBlock(cUnit, blockList[i],
-                                  &labelList[blockList[i]->fallThrough->id]);
+              setupLoopEntryBlock(cUnit, bb,
+                                  &labelList[bb->fallThrough->id]);
             }
-        } else if (blockList[i]->blockType == kTraceExitBlock) {
+        } else if (bb->blockType == kTraceExitBlock) {
             labelList[i].opcode = kArmPseudoExitBlock;
             goto gen_fallthrough;
-        } else if (blockList[i]->blockType == kDalvikByteCode) {
+        } else if (bb->blockType == kDalvikByteCode) {
             labelList[i].opcode = kArmPseudoNormalBlockLabel;
             /* Reset the register state */
             dvmCompilerResetRegPool(cUnit);
             dvmCompilerClobberAllRegs(cUnit);
             dvmCompilerResetNullCheck(cUnit);
         } else {
-            switch (blockList[i]->blockType) {
+            switch (bb->blockType) {
                 case kChainingCellNormal:
                     labelList[i].opcode = kArmPseudoChainingCellNormal;
                     /* handle the codegen later */
                     dvmInsertGrowableList(
-                        &chainingListByType[kChainingCellNormal], (void *) i);
+                        &chainingListByType[kChainingCellNormal], i);
                     break;
                 case kChainingCellInvokeSingleton:
                     labelList[i].opcode =
                         kArmPseudoChainingCellInvokeSingleton;
                     labelList[i].operands[0] =
-                        (int) blockList[i]->containingMethod;
+                        (int) bb->containingMethod;
                     /* handle the codegen later */
                     dvmInsertGrowableList(
-                        &chainingListByType[kChainingCellInvokeSingleton],
-                        (void *) i);
+                        &chainingListByType[kChainingCellInvokeSingleton], i);
                     break;
                 case kChainingCellInvokePredicted:
                     labelList[i].opcode =
                         kArmPseudoChainingCellInvokePredicted;
                     /* handle the codegen later */
                     dvmInsertGrowableList(
-                        &chainingListByType[kChainingCellInvokePredicted],
-                        (void *) i);
+                        &chainingListByType[kChainingCellInvokePredicted], i);
                     break;
                 case kChainingCellHot:
                     labelList[i].opcode =
                         kArmPseudoChainingCellHot;
                     /* handle the codegen later */
                     dvmInsertGrowableList(
-                        &chainingListByType[kChainingCellHot],
-                        (void *) i);
+                        &chainingListByType[kChainingCellHot], i);
                     break;
                 case kPCReconstruction:
                     /* Make sure exception handling block is next */
@@ -4053,22 +4255,20 @@
                 case kExceptionHandling:
                     labelList[i].opcode = kArmPseudoEHBlockLabel;
                     if (cUnit->pcReconstructionList.numUsed) {
-                        loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
+                        loadWordDisp(cUnit, r6SELF, offsetof(Thread,
                                      jitToInterpEntries.dvmJitToInterpPunt),
                                      r1);
                         opReg(cUnit, kOpBlx, r1);
                     }
                     break;
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
                 case kChainingCellBackwardBranch:
                     labelList[i].opcode =
                         kArmPseudoChainingCellBackwardBranch;
                     /* handle the codegen later */
                     dvmInsertGrowableList(
                         &chainingListByType[kChainingCellBackwardBranch],
-                        (void *) i);
+                        i);
                     break;
-#endif
                 default:
                     break;
             }
@@ -4076,173 +4276,193 @@
         }
 
         ArmLIR *headLIR = NULL;
+        BasicBlock *nextBB = bb;
 
-        for (mir = blockList[i]->firstMIRInsn; mir; mir = mir->next) {
+        /*
+         * Try to build a longer optimization unit. Currently if the previous
+         * block ends with a goto, we continue adding instructions and don't
+         * reset the register allocation pool.
+         */
+        for (; nextBB != NULL; nextBB = cUnit->nextCodegenBlock) {
+            bb = nextBB;
+            bb->visited = true;
+            cUnit->nextCodegenBlock = NULL;
 
-            dvmCompilerResetRegPool(cUnit);
-            if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
-                dvmCompilerClobberAllRegs(cUnit);
-            }
+            for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
 
-            if (gDvmJit.disableOpt & (1 << kSuppressLoads)) {
-                dvmCompilerResetDefTracking(cUnit);
-            }
-
-            if (mir->dalvikInsn.opcode >= kMirOpFirst) {
-                handleExtendedMIR(cUnit, mir);
-                continue;
-            }
-
-
-            Opcode dalvikOpcode = mir->dalvikInsn.opcode;
-            InstructionFormat dalvikFormat = dexGetFormatFromOpcode(dalvikOpcode);
-            char *note;
-            if (mir->OptimizationFlags & MIR_INLINED) {
-                note = " (I)";
-            } else if (mir->OptimizationFlags & MIR_INLINED_PRED) {
-                note = " (PI)";
-            } else if (mir->OptimizationFlags & MIR_CALLEE) {
-                note = " (C)";
-            } else {
-                note = NULL;
-            }
-
-            ArmLIR *boundaryLIR;
-
-            /*
-             * Don't generate the boundary LIR unless we are debugging this
-             * trace or we need a scheduling barrier.
-             */
-            if (headLIR == NULL || cUnit->printMe == true) {
-                boundaryLIR =
-                    newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary,
-                            mir->offset,
-                            (int) dvmCompilerGetDalvikDisassembly(
-                                &mir->dalvikInsn, note));
-                /* Remember the first LIR for this block */
-                if (headLIR == NULL) {
-                    headLIR = boundaryLIR;
-                    /* Set the first boundaryLIR as a scheduling barrier */
-                    headLIR->defMask = ENCODE_ALL;
+                dvmCompilerResetRegPool(cUnit);
+                if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
+                    dvmCompilerClobberAllRegs(cUnit);
                 }
-            }
 
-            /* Don't generate the SSA annotation unless verbose mode is on */
-            if (cUnit->printMe && mir->ssaRep) {
-                char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
-                newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
-            }
+                if (gDvmJit.disableOpt & (1 << kSuppressLoads)) {
+                    dvmCompilerResetDefTracking(cUnit);
+                }
 
-            bool notHandled;
-            /*
-             * Debugging: screen the opcode first to see if it is in the
-             * do[-not]-compile list
-             */
-            bool singleStepMe = SINGLE_STEP_OP(dalvikOpcode);
+                if (mir->dalvikInsn.opcode >= kMirOpFirst) {
+                    handleExtendedMIR(cUnit, mir);
+                    continue;
+                }
+
+
+                Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+                InstructionFormat dalvikFormat =
+                    dexGetFormatFromOpcode(dalvikOpcode);
+                char *note;
+                if (mir->OptimizationFlags & MIR_INLINED) {
+                    note = " (I)";
+                } else if (mir->OptimizationFlags & MIR_INLINED_PRED) {
+                    note = " (PI)";
+                } else if (mir->OptimizationFlags & MIR_CALLEE) {
+                    note = " (C)";
+                } else {
+                    note = NULL;
+                }
+
+                ArmLIR *boundaryLIR;
+
+                /*
+                 * Don't generate the boundary LIR unless we are debugging this
+                 * trace or we need a scheduling barrier.
+                 */
+                if (headLIR == NULL || cUnit->printMe == true) {
+                    boundaryLIR =
+                        newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary,
+                                mir->offset,
+                                (int) dvmCompilerGetDalvikDisassembly(
+                                    &mir->dalvikInsn, note));
+                    /* Remember the first LIR for this block */
+                    if (headLIR == NULL) {
+                        headLIR = boundaryLIR;
+                        /* Set the first boundaryLIR as a scheduling barrier */
+                        headLIR->defMask = ENCODE_ALL;
+                    }
+                }
+
+                /*
+                 * Don't generate the SSA annotation unless verbose mode is on
+                 */
+                if (cUnit->printMe && mir->ssaRep) {
+                    char *ssaString = dvmCompilerGetSSAString(cUnit,
+                                                              mir->ssaRep);
+                    newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
+                }
+
+                bool notHandled;
+                /*
+                 * Debugging: screen the opcode first to see if it is in the
+                 * do[-not]-compile list
+                 */
+                bool singleStepMe = SINGLE_STEP_OP(dalvikOpcode);
 #if defined(WITH_SELF_VERIFICATION)
-          if (singleStepMe == false) {
-              singleStepMe = selfVerificationPuntOps(mir);
-          }
+              if (singleStepMe == false) {
+                  singleStepMe = selfVerificationPuntOps(mir);
+              }
 #endif
-            if (singleStepMe || cUnit->allSingleStep) {
-                notHandled = false;
-                genInterpSingleStep(cUnit, mir);
-            } else {
-                opcodeCoverage[dalvikOpcode]++;
-                switch (dalvikFormat) {
-                    case kFmt10t:
-                    case kFmt20t:
-                    case kFmt30t:
-                        notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
-                                  mir, blockList[i], labelList);
-                        break;
-                    case kFmt10x:
-                        notHandled = handleFmt10x(cUnit, mir);
-                        break;
-                    case kFmt11n:
-                    case kFmt31i:
-                        notHandled = handleFmt11n_Fmt31i(cUnit, mir);
-                        break;
-                    case kFmt11x:
-                        notHandled = handleFmt11x(cUnit, mir);
-                        break;
-                    case kFmt12x:
-                        notHandled = handleFmt12x(cUnit, mir);
-                        break;
-                    case kFmt20bc:
-                        notHandled = handleFmt20bc(cUnit, mir);
-                        break;
-                    case kFmt21c:
-                    case kFmt31c:
-                        notHandled = handleFmt21c_Fmt31c(cUnit, mir);
-                        break;
-                    case kFmt21h:
-                        notHandled = handleFmt21h(cUnit, mir);
-                        break;
-                    case kFmt21s:
-                        notHandled = handleFmt21s(cUnit, mir);
-                        break;
-                    case kFmt21t:
-                        notHandled = handleFmt21t(cUnit, mir, blockList[i],
-                                                  labelList);
-                        break;
-                    case kFmt22b:
-                    case kFmt22s:
-                        notHandled = handleFmt22b_Fmt22s(cUnit, mir);
-                        break;
-                    case kFmt22c:
-                        notHandled = handleFmt22c(cUnit, mir);
-                        break;
-                    case kFmt22cs:
-                        notHandled = handleFmt22cs(cUnit, mir);
-                        break;
-                    case kFmt22t:
-                        notHandled = handleFmt22t(cUnit, mir, blockList[i],
-                                                  labelList);
-                        break;
-                    case kFmt22x:
-                    case kFmt32x:
-                        notHandled = handleFmt22x_Fmt32x(cUnit, mir);
-                        break;
-                    case kFmt23x:
-                        notHandled = handleFmt23x(cUnit, mir);
-                        break;
-                    case kFmt31t:
-                        notHandled = handleFmt31t(cUnit, mir);
-                        break;
-                    case kFmt3rc:
-                    case kFmt35c:
-                        notHandled = handleFmt35c_3rc(cUnit, mir, blockList[i],
+                if (singleStepMe || cUnit->allSingleStep) {
+                    notHandled = false;
+                    genInterpSingleStep(cUnit, mir);
+                } else {
+                    opcodeCoverage[dalvikOpcode]++;
+                    switch (dalvikFormat) {
+                        case kFmt10t:
+                        case kFmt20t:
+                        case kFmt30t:
+                            notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
+                                      mir, bb, labelList);
+                            break;
+                        case kFmt10x:
+                            notHandled = handleFmt10x(cUnit, mir);
+                            break;
+                        case kFmt11n:
+                        case kFmt31i:
+                            notHandled = handleFmt11n_Fmt31i(cUnit, mir);
+                            break;
+                        case kFmt11x:
+                            notHandled = handleFmt11x(cUnit, mir);
+                            break;
+                        case kFmt12x:
+                            notHandled = handleFmt12x(cUnit, mir);
+                            break;
+                        case kFmt20bc:
+                        case kFmt40sc:
+                            notHandled = handleFmt20bc_Fmt40sc(cUnit, mir);
+                            break;
+                        case kFmt21c:
+                        case kFmt31c:
+                        case kFmt41c:
+                            notHandled = handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+                            break;
+                        case kFmt21h:
+                            notHandled = handleFmt21h(cUnit, mir);
+                            break;
+                        case kFmt21s:
+                            notHandled = handleFmt21s(cUnit, mir);
+                            break;
+                        case kFmt21t:
+                            notHandled = handleFmt21t(cUnit, mir, bb,
                                                       labelList);
-                        break;
-                    case kFmt3rms:
-                    case kFmt35ms:
-                        notHandled = handleFmt35ms_3rms(cUnit, mir,blockList[i],
-                                                        labelList);
-                        break;
-                    case kFmt35mi:
-                    case kFmt3rmi:
-                        notHandled = handleExecuteInline(cUnit, mir);
-                        break;
-                    case kFmt51l:
-                        notHandled = handleFmt51l(cUnit, mir);
-                        break;
-                    default:
-                        notHandled = true;
-                        break;
+                            break;
+                        case kFmt22b:
+                        case kFmt22s:
+                            notHandled = handleFmt22b_Fmt22s(cUnit, mir);
+                            break;
+                        case kFmt22c:
+                        case kFmt52c:
+                            notHandled = handleFmt22c_Fmt52c(cUnit, mir);
+                            break;
+                        case kFmt22cs:
+                            notHandled = handleFmt22cs(cUnit, mir);
+                            break;
+                        case kFmt22t:
+                            notHandled = handleFmt22t(cUnit, mir, bb,
+                                                      labelList);
+                            break;
+                        case kFmt22x:
+                        case kFmt32x:
+                            notHandled = handleFmt22x_Fmt32x(cUnit, mir);
+                            break;
+                        case kFmt23x:
+                            notHandled = handleFmt23x(cUnit, mir);
+                            break;
+                        case kFmt31t:
+                            notHandled = handleFmt31t(cUnit, mir);
+                            break;
+                        case kFmt3rc:
+                        case kFmt35c:
+                        case kFmt5rc:
+                            notHandled = handleFmt35c_3rc_5rc(cUnit, mir, bb,
+                                                          labelList);
+                            break;
+                        case kFmt3rms:
+                        case kFmt35ms:
+                            notHandled = handleFmt35ms_3rms(cUnit, mir, bb,
+                                                            labelList);
+                            break;
+                        case kFmt35mi:
+                        case kFmt3rmi:
+                            notHandled = handleExecuteInline(cUnit, mir);
+                            break;
+                        case kFmt51l:
+                            notHandled = handleFmt51l(cUnit, mir);
+                            break;
+                        default:
+                            notHandled = true;
+                            break;
+                    }
                 }
-            }
-            if (notHandled) {
-                LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n",
-                     mir->offset,
-                     dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
-                     dalvikFormat);
-                dvmCompilerAbort(cUnit);
-                break;
+                if (notHandled) {
+                    LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n",
+                         mir->offset,
+                         dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
+                         dalvikFormat);
+                    dvmCompilerAbort(cUnit);
+                    break;
+                }
             }
         }
 
-        if (blockList[i]->blockType == kTraceEntryBlock) {
+        if (bb->blockType == kTraceEntryBlock) {
             dvmCompilerAppendLIR(cUnit,
                                  (LIR *) cUnit->loopAnalysis->branchToBody);
             dvmCompilerAppendLIR(cUnit,
@@ -4263,11 +4483,9 @@
          * Check if the block is terminated due to trace length constraint -
          * insert an unconditional branch to the chaining cell.
          */
-        if (blockList[i]->needFallThroughBranch) {
-            genUnconditionalBranch(cUnit,
-                                   &labelList[blockList[i]->fallThrough->id]);
+        if (bb->needFallThroughBranch) {
+            genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
         }
-
     }
 
     /* Handle the chaining cells in predefined order */
@@ -4286,6 +4504,9 @@
 
         for (j = 0; j < chainingListByType[i].numUsed; j++) {
             int blockId = blockIdList[j];
+            BasicBlock *chainingBlock =
+                (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
+                                                         blockId);
 
             /* Align this chaining cell first */
             newLIR0(cUnit, kArmPseudoPseudoAlign4);
@@ -4294,30 +4515,26 @@
             dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
 
 
-            switch (blockList[blockId]->blockType) {
+            switch (chainingBlock->blockType) {
                 case kChainingCellNormal:
-                    handleNormalChainingCell(cUnit,
-                      blockList[blockId]->startOffset);
+                    handleNormalChainingCell(cUnit, chainingBlock->startOffset);
                     break;
                 case kChainingCellInvokeSingleton:
                     handleInvokeSingletonChainingCell(cUnit,
-                        blockList[blockId]->containingMethod);
+                        chainingBlock->containingMethod);
                     break;
                 case kChainingCellInvokePredicted:
                     handleInvokePredictedChainingCell(cUnit);
                     break;
                 case kChainingCellHot:
-                    handleHotChainingCell(cUnit,
-                        blockList[blockId]->startOffset);
+                    handleHotChainingCell(cUnit, chainingBlock->startOffset);
                     break;
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
                 case kChainingCellBackwardBranch:
                     handleBackwardBranchChainingCell(cUnit,
-                        blockList[blockId]->startOffset);
+                        chainingBlock->startOffset);
                     break;
-#endif
                 default:
-                    LOGE("Bad blocktype %d", blockList[blockId]->blockType);
+                    LOGE("Bad blocktype %d", chainingBlock->blockType);
                     dvmCompilerAbort(cUnit);
             }
         }
@@ -4332,7 +4549,7 @@
      */
     if (cUnit->switchOverflowPad) {
         loadConstant(cUnit, r0, (int) cUnit->switchOverflowPad);
-        loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
+        loadWordDisp(cUnit, r6SELF, offsetof(Thread,
                      jitToInterpEntries.dvmJitToInterpNoChain), r2);
         opRegReg(cUnit, kOpAdd, r1, r1);
         opRegRegReg(cUnit, kOpAdd, r4PC, r0, r1);
@@ -4349,10 +4566,15 @@
 #endif
 }
 
-/* Accept the work and start compiling */
+/*
+ * Accept the work and start compiling.  Returns true if compilation
+ * is attempted.
+ */
 bool dvmCompilerDoWork(CompilerWorkOrder *work)
 {
-    bool res;
+    JitTraceDescription *desc;
+    bool isCompile;
+    bool success = true;
 
     if (gDvmJit.codeCacheFull) {
         return false;
@@ -4360,25 +4582,35 @@
 
     switch (work->kind) {
         case kWorkOrderTrace:
+            isCompile = true;
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
-                                  work->bailPtr, 0 /* no hints */);
+            desc = (JitTraceDescription *)work->info;
+            success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+                                        work->bailPtr, 0 /* no hints */);
             break;
         case kWorkOrderTraceDebug: {
             bool oldPrintMe = gDvmJit.printMe;
             gDvmJit.printMe = true;
+            isCompile = true;
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
-                                  work->bailPtr, 0 /* no hints */);
+            desc = (JitTraceDescription *)work->info;
+            success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+                                        work->bailPtr, 0 /* no hints */);
             gDvmJit.printMe = oldPrintMe;
             break;
         }
+        case kWorkOrderProfileMode:
+            dvmJitChangeProfileMode((TraceProfilingModes)work->info);
+            isCompile = false;
+            break;
         default:
-            res = false;
+            isCompile = false;
             LOGE("Jit: unknown work order type");
             assert(0);  // Bail if debug build, discard otherwise
     }
-    return res;
+    if (!success)
+        work->result.codeAddress = NULL;
+    return isCompile;
 }
 
 /* Architectural-specific debugging helpers go here */
@@ -4449,6 +4681,11 @@
                       templateEntryOffsets[TEMPLATE_INTERPRET]);
 }
 
+JitInstructionSetType dvmCompilerGetInterpretTemplateSet()
+{
+    return DALVIK_JIT_ARM;
+}
+
 /* Needed by the Assembler */
 void dvmCompilerSetupResourceMasks(ArmLIR *lir)
 {
diff --git a/vm/compiler/codegen/arm/FP/Thumb2VFP.c b/vm/compiler/codegen/arm/FP/Thumb2VFP.c
index f0a5198..61698c2 100644
--- a/vm/compiler/codegen/arm/FP/Thumb2VFP.c
+++ b/vm/compiler/codegen/arm/FP/Thumb2VFP.c
@@ -203,7 +203,7 @@
     label->defMask = ENCODE_ALL;
     branch->generic.target = (LIR *)label;
     storeValueWide(cUnit, rlDest, rlResult);
-    return true;
+    return false;
 }
 
 static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
@@ -251,11 +251,16 @@
     }
     assert(!FPREG(rlResult.lowReg));
     newLIR0(cUnit, kThumb2Fmstat);
+
     genIT(cUnit, (defaultResult == -1) ? kArmCondGt : kArmCondMi, "");
     newLIR2(cUnit, kThumb2MovImmShift, rlResult.lowReg,
             modifiedImmediate(-defaultResult)); // Must not alter ccodes
+    genBarrier(cUnit);
+
     genIT(cUnit, kArmCondEq, "");
     loadConstant(cUnit, rlResult.lowReg, 0);
+    genBarrier(cUnit);
+
     storeValue(cUnit, rlDest, rlResult);
     return false;
 }
diff --git a/vm/compiler/codegen/arm/FP/ThumbPortableFP.c b/vm/compiler/codegen/arm/FP/ThumbPortableFP.c
index ef288ac..7aac8e6 100644
--- a/vm/compiler/codegen/arm/FP/ThumbPortableFP.c
+++ b/vm/compiler/codegen/arm/FP/ThumbPortableFP.c
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-/* Forward decalraton the portable versions due to circular dependency */
+/* Forward-declare the portable versions due to circular dependency */
 static bool genArithOpFloatPortable(CompilationUnit *cUnit, MIR *mir,
                                     RegLocation rlDest, RegLocation rlSrc1,
                                     RegLocation rlSrc2);
@@ -25,6 +25,8 @@
 
 static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir);
 
+static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir);
+
 static bool genConversion(CompilationUnit *cUnit, MIR *mir)
 {
     return genConversionPortable(cUnit, mir);
@@ -46,7 +48,7 @@
 
 static bool genInlineSqrt(CompilationUnit *cUnit, MIR *mir)
 {
-    return false;   /* punt to C handler */
+    return handleExecuteInlineC(cUnit, mir);
 }
 
 static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
diff --git a/vm/compiler/codegen/arm/FP/ThumbVFP.c b/vm/compiler/codegen/arm/FP/ThumbVFP.c
index 9bfcd55..f685f24 100644
--- a/vm/compiler/codegen/arm/FP/ThumbVFP.c
+++ b/vm/compiler/codegen/arm/FP/ThumbVFP.c
@@ -38,7 +38,7 @@
      }
      dvmCompilerClobber(cUnit, rDest);
      dvmCompilerLockTemp(cUnit, rDest);
-     opRegRegImm(cUnit, kOpAdd, rDest, rFP,
+     opRegRegImm(cUnit, kOpAdd, rDest, r5FP,
                  dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2);
 }
 
@@ -58,7 +58,7 @@
  * to the handlers rather than load the operands into core registers
  * and then move the values to FP regs in the handlers.  Other implementations
  * may prefer passing data in registers (and the latter approach would
- * yeild cleaner register handling - avoiding the requirement that operands
+ * yield cleaner register handling - avoiding the requirement that operands
  * be flushed to memory prior to the call).
  */
 static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir,
@@ -158,37 +158,37 @@
     bool longDest = false;
     RegLocation rlSrc;
     RegLocation rlDest;
-    TemplateOpcode template;
+    TemplateOpcode templateOpcode;
     switch (opcode) {
         case OP_INT_TO_FLOAT:
             longSrc = false;
             longDest = false;
-            template = TEMPLATE_INT_TO_FLOAT_VFP;
+            templateOpcode = TEMPLATE_INT_TO_FLOAT_VFP;
             break;
         case OP_FLOAT_TO_INT:
             longSrc = false;
             longDest = false;
-            template = TEMPLATE_FLOAT_TO_INT_VFP;
+            templateOpcode = TEMPLATE_FLOAT_TO_INT_VFP;
             break;
         case OP_DOUBLE_TO_FLOAT:
             longSrc = true;
             longDest = false;
-            template = TEMPLATE_DOUBLE_TO_FLOAT_VFP;
+            templateOpcode = TEMPLATE_DOUBLE_TO_FLOAT_VFP;
             break;
         case OP_FLOAT_TO_DOUBLE:
             longSrc = false;
             longDest = true;
-            template = TEMPLATE_FLOAT_TO_DOUBLE_VFP;
+            templateOpcode = TEMPLATE_FLOAT_TO_DOUBLE_VFP;
             break;
         case OP_INT_TO_DOUBLE:
             longSrc = false;
             longDest = true;
-            template = TEMPLATE_INT_TO_DOUBLE_VFP;
+            templateOpcode = TEMPLATE_INT_TO_DOUBLE_VFP;
             break;
         case OP_DOUBLE_TO_INT:
             longSrc = true;
             longDest = false;
-            template = TEMPLATE_DOUBLE_TO_INT_VFP;
+            templateOpcode = TEMPLATE_DOUBLE_TO_INT_VFP;
             break;
         case OP_LONG_TO_DOUBLE:
         case OP_FLOAT_TO_LONG:
@@ -212,7 +212,7 @@
     }
     loadValueAddressDirect(cUnit, rlDest, r0);
     loadValueAddressDirect(cUnit, rlSrc, r1);
-    genDispatchToHandler(cUnit, template);
+    genDispatchToHandler(cUnit, templateOpcode);
     if (rlDest.wide) {
         rlDest = dvmCompilerUpdateLocWide(cUnit, rlDest);
         dvmCompilerClobber(cUnit, rlDest.highReg);
@@ -226,31 +226,31 @@
 static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
                      RegLocation rlSrc1, RegLocation rlSrc2)
 {
-    TemplateOpcode template;
+    TemplateOpcode templateOpcode;
     RegLocation rlResult = dvmCompilerGetReturn(cUnit);
     bool wide = true;
 
     switch(mir->dalvikInsn.opcode) {
         case OP_CMPL_FLOAT:
-            template = TEMPLATE_CMPL_FLOAT_VFP;
+            templateOpcode = TEMPLATE_CMPL_FLOAT_VFP;
             wide = false;
             break;
         case OP_CMPG_FLOAT:
-            template = TEMPLATE_CMPG_FLOAT_VFP;
+            templateOpcode = TEMPLATE_CMPG_FLOAT_VFP;
             wide = false;
             break;
         case OP_CMPL_DOUBLE:
-            template = TEMPLATE_CMPL_DOUBLE_VFP;
+            templateOpcode = TEMPLATE_CMPL_DOUBLE_VFP;
             break;
         case OP_CMPG_DOUBLE:
-            template = TEMPLATE_CMPG_DOUBLE_VFP;
+            templateOpcode = TEMPLATE_CMPG_DOUBLE_VFP;
             break;
         default:
             return true;
     }
     loadValueAddressDirect(cUnit, rlSrc1, r0);
     loadValueAddressDirect(cUnit, rlSrc2, r1);
-    genDispatchToHandler(cUnit, template);
+    genDispatchToHandler(cUnit, templateOpcode);
     storeValue(cUnit, rlDest, rlResult);
     return false;
 }
diff --git a/vm/compiler/codegen/arm/GlobalOptimizations.c b/vm/compiler/codegen/arm/GlobalOptimizations.c
index c1e69c3..e52bd8a 100644
--- a/vm/compiler/codegen/arm/GlobalOptimizations.c
+++ b/vm/compiler/codegen/arm/GlobalOptimizations.c
@@ -41,14 +41,18 @@
                  * Is the branch target the next instruction?
                  */
                 if (nextLIR == (ArmLIR *) thisLIR->generic.target) {
-                    thisLIR->isNop = true;
+                    thisLIR->flags.isNop = true;
                     break;
                 }
 
                 /*
-                 * Found real useful stuff between the branch and the target
+                 * Found real useful stuff between the branch and the target.
+                 * Need to explicitly check the lastLIRInsn here since with
+                 * method-based JIT the branch might be the last real
+                 * instruction.
                  */
-                if (!isPseudoOpcode(nextLIR->opcode))
+                if (!isPseudoOpcode(nextLIR->opcode) ||
+                    (nextLIR = (ArmLIR *) cUnit->lastLIRInsn))
                     break;
             }
         }
diff --git a/vm/compiler/codegen/arm/LocalOptimizations.c b/vm/compiler/codegen/arm/LocalOptimizations.c
index 33e1e41..4c0354a 100644
--- a/vm/compiler/codegen/arm/LocalOptimizations.c
+++ b/vm/compiler/codegen/arm/LocalOptimizations.c
@@ -21,33 +21,23 @@
 
 #define DEBUG_OPT(X)
 
-ArmLIR* dvmCompilerGenCopy(CompilationUnit *cUnit, int rDest, int rSrc);
+/* Check RAW, WAR, and WAR dependency on the register operands */
+#define CHECK_REG_DEP(use, def, check) ((def & check->useMask) || \
+                                        ((use | def) & check->defMask))
 
-/* Is this a Dalvik register access? */
-static inline bool isDalvikLoad(ArmLIR *lir)
-{
-    return (lir->useMask != ENCODE_ALL) && (lir->useMask & ENCODE_DALVIK_REG);
-}
-
-/* Is this a load from the literal pool? */
-static inline bool isLiteralLoad(ArmLIR *lir)
-{
-    return (lir->useMask != ENCODE_ALL) && (lir->useMask & ENCODE_LITERAL);
-}
-
-static inline bool isDalvikStore(ArmLIR *lir)
-{
-    return (lir->defMask != ENCODE_ALL) && (lir->defMask & ENCODE_DALVIK_REG);
-}
+/* Scheduler heuristics */
+#define MAX_HOIST_DISTANCE 20
+#define LDLD_DISTANCE 4
+#define LD_LATENCY 2
 
 static inline bool isDalvikRegisterClobbered(ArmLIR *lir1, ArmLIR *lir2)
 {
-  int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
-  int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
-  int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
-  int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
+    int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
+    int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
+    int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
+    int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
 
-  return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
+    return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
 }
 
 #if 0
@@ -61,10 +51,39 @@
 }
 #endif
 
+/* Convert a more expensive instruction (ie load) into a move */
+static void convertMemOpIntoMove(CompilationUnit *cUnit, ArmLIR *origLIR,
+                                 int dest, int src)
+{
+    /* Insert a move to replace the load */
+    ArmLIR *moveLIR;
+    moveLIR = dvmCompilerRegCopyNoInsert( cUnit, dest, src);
+    /*
+     * Insert the converted instruction after the original since the
+     * optimization is scannng in the top-down order and the new instruction
+     * will need to be re-checked (eg the new dest clobbers the src used in
+     * thisLIR).
+     */
+    dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) moveLIR);
+}
+
 /*
- * Perform a pass of top-down walk to
- * 1) Eliminate redundant loads and stores
- * 2) Sink stores to latest possible slot
+ * Perform a pass of top-down walk, from the second-last instruction in the
+ * superblock, to eliminate redundant loads and stores.
+ *
+ * An earlier load can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * An earlier store can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * A later store can be eliminated by an earlier store iff
+ *   1) They are must-aliases
+ *   2) The memory location is not written to in between
  */
 static void applyLoadStoreElimination(CompilationUnit *cUnit,
                                       ArmLIR *headLIR,
@@ -72,428 +91,347 @@
 {
     ArmLIR *thisLIR;
 
-    cUnit->optRound++;
-    for (thisLIR = headLIR;
-         thisLIR != tailLIR;
-         thisLIR = NEXT_LIR(thisLIR)) {
-        /* Skip newly added instructions */
-        if (thisLIR->age >= cUnit->optRound) {
+    if (headLIR == tailLIR) return;
+
+    for (thisLIR = PREV_LIR(tailLIR);
+         thisLIR != headLIR;
+         thisLIR = PREV_LIR(thisLIR)) {
+        int sinkDistance = 0;
+
+        /* Skip non-interesting instructions */
+        if ((thisLIR->flags.isNop == true) ||
+            isPseudoOpcode(thisLIR->opcode) ||
+            !(EncodingMap[thisLIR->opcode].flags & (IS_LOAD | IS_STORE))) {
             continue;
         }
-        if (isDalvikStore(thisLIR)) {
-            int nativeRegId = thisLIR->operands[0];
-            ArmLIR *checkLIR;
-            int sinkDistance = 0;
+
+        int nativeRegId = thisLIR->operands[0];
+        bool isThisLIRLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
+        ArmLIR *checkLIR;
+        /* Use the mem mask to determine the rough memory location */
+        u8 thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM;
+
+        /*
+         * Currently only eliminate redundant ld/st for constant and Dalvik
+         * register accesses.
+         */
+        if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+
+        /*
+         * Add r15 (pc) to the resource mask to prevent this instruction
+         * from sinking past branch instructions. Also take out the memory
+         * region bits since stopMask is used to check data/control
+         * dependencies.
+         */
+        u8 stopUseRegMask = (ENCODE_REG_PC | thisLIR->useMask) &
+                            ~ENCODE_MEM;
+        u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+
+        for (checkLIR = NEXT_LIR(thisLIR);
+             checkLIR != tailLIR;
+             checkLIR = NEXT_LIR(checkLIR)) {
+
+            u8 checkMemMask = (checkLIR->useMask | checkLIR->defMask) &
+                              ENCODE_MEM;
+            u8 aliasCondition = thisMemMask & checkMemMask;
+            bool stopHere = false;
+
             /*
-             * Add r15 (pc) to the mask to prevent this instruction
-             * from sinking past branch instructions. Unset the Dalvik register
-             * bit when checking with native resource constraints.
+             * Potential aliases seen - check the alias relations
              */
-            u8 stopMask = (ENCODE_REG_PC | thisLIR->useMask) &
-                          ~ENCODE_DALVIK_REG;
-
-            for (checkLIR = NEXT_LIR(thisLIR);
-                 checkLIR != tailLIR;
-                 checkLIR = NEXT_LIR(checkLIR)) {
-
-                /* Check if a Dalvik register load is redundant */
-                if (isDalvikLoad(checkLIR) &&
-                    (checkLIR->aliasInfo == thisLIR->aliasInfo) &&
-                    (REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId))) {
-                    /* Insert a move to replace the load */
-                    if (checkLIR->operands[0] != nativeRegId) {
-                        ArmLIR *moveLIR;
-                        moveLIR = dvmCompilerRegCopyNoInsert(
-                                    cUnit, checkLIR->operands[0], nativeRegId);
-                        /*
-                         * Insert the converted checkLIR instruction after the
-                         * the original checkLIR since the optimization is
-                         * scannng in the top-down order and the new instruction
-                         * will need to be checked.
-                         */
-                        dvmCompilerInsertLIRAfter((LIR *) checkLIR,
-                                                  (LIR *) moveLIR);
-                    }
-                    checkLIR->isNop = true;
-                    continue;
-
-                /*
-                 * Found a true output dependency - nuke the previous store.
-                 * The register type doesn't matter here.
-                 */
-                } else if (isDalvikStore(checkLIR) &&
-                           (checkLIR->aliasInfo == thisLIR->aliasInfo)) {
-                    thisLIR->isNop = true;
-                    break;
-                /* Find out the latest slot that the store can be sunk into */
-                } else {
-                    /* Last instruction reached */
-                    bool stopHere = (NEXT_LIR(checkLIR) == tailLIR);
-
-                    /* Store data is clobbered */
-                    stopHere |= ((stopMask & checkLIR->defMask) != 0);
-
-                    /* Store data partially clobbers the Dalvik register */
-                    if (stopHere == false &&
-                        ((checkLIR->useMask | checkLIR->defMask) &
-                         ENCODE_DALVIK_REG)) {
-                        stopHere = isDalvikRegisterClobbered(thisLIR, checkLIR);
-                    }
-
-                    /* Found a new place to put the store - move it here */
-                    if (stopHere == true) {
-                        DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
-                                                        "SINK STORE"));
-                        /* The store can be sunk for at least one cycle */
-                        if (sinkDistance != 0) {
-                            ArmLIR *newStoreLIR =
-                                dvmCompilerNew(sizeof(ArmLIR), true);
-                            *newStoreLIR = *thisLIR;
-                            newStoreLIR->age = cUnit->optRound;
-                            /*
-                             * Stop point found - insert *before* the checkLIR
-                             * since the instruction list is scanned in the
-                             * top-down order.
-                             */
-                            dvmCompilerInsertLIRBefore((LIR *) checkLIR,
-                                                       (LIR *) newStoreLIR);
-                            thisLIR->isNop = true;
-                        }
-                        break;
-                    }
-
+            if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+                bool isCheckLIRLoad = EncodingMap[checkLIR->opcode].flags &
+                                      IS_LOAD;
+                if  (aliasCondition == ENCODE_LITERAL) {
                     /*
-                     * Saw a real instruction that the store can be sunk after
+                     * Should only see literal loads in the instruction
+                     * stream.
                      */
-                    if (!isPseudoOpcode(checkLIR->opcode)) {
-                        sinkDistance++;
+                    assert(!(EncodingMap[checkLIR->opcode].flags &
+                             IS_STORE));
+                    /* Same value && same register type */
+                    if (checkLIR->aliasInfo == thisLIR->aliasInfo &&
+                        REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId)){
+                        /*
+                         * Different destination register - insert
+                         * a move
+                         */
+                        if (checkLIR->operands[0] != nativeRegId) {
+                            convertMemOpIntoMove(cUnit, checkLIR,
+                                                 checkLIR->operands[0],
+                                                 nativeRegId);
+                        }
+                        checkLIR->flags.isNop = true;
+                    }
+                } else if (aliasCondition == ENCODE_DALVIK_REG) {
+                    /* Must alias */
+                    if (checkLIR->aliasInfo == thisLIR->aliasInfo) {
+                        /* Only optimize compatible registers */
+                        bool regCompatible =
+                            REGTYPE(checkLIR->operands[0]) ==
+                            REGTYPE(nativeRegId);
+                        if ((isThisLIRLoad && isCheckLIRLoad) ||
+                            (!isThisLIRLoad && isCheckLIRLoad)) {
+                            /* RAR or RAW */
+                            if (regCompatible) {
+                                /*
+                                 * Different destination register -
+                                 * insert a move
+                                 */
+                                if (checkLIR->operands[0] !=
+                                    nativeRegId) {
+                                    convertMemOpIntoMove(cUnit,
+                                                 checkLIR,
+                                                 checkLIR->operands[0],
+                                                 nativeRegId);
+                                }
+                                checkLIR->flags.isNop = true;
+                            } else {
+                                /*
+                                 * Destinaions are of different types -
+                                 * something complicated going on so
+                                 * stop looking now.
+                                 */
+                                stopHere = true;
+                            }
+                        } else if (isThisLIRLoad && !isCheckLIRLoad) {
+                            /* WAR - register value is killed */
+                            stopHere = true;
+                        } else if (!isThisLIRLoad && !isCheckLIRLoad) {
+                            /* WAW - nuke the earlier store */
+                            thisLIR->flags.isNop = true;
+                            stopHere = true;
+                        }
+                    /* Partial overlap */
+                    } else if (isDalvikRegisterClobbered(thisLIR, checkLIR)) {
+                        /*
+                         * It is actually ok to continue if checkLIR
+                         * is a read. But it is hard to make a test
+                         * case for this so we just stop here to be
+                         * conservative.
+                         */
+                        stopHere = true;
                     }
                 }
+                /* Memory content may be updated. Stop looking now. */
+                if (stopHere) {
+                    break;
+                /* The checkLIR has been transformed - check the next one */
+                } else if (checkLIR->flags.isNop) {
+                    continue;
+                }
+            }
+
+
+            /*
+             * this and check LIRs have no memory dependency. Now check if
+             * their register operands have any RAW, WAR, and WAW
+             * dependencies. If so, stop looking.
+             */
+            if (stopHere == false) {
+                stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
+                                         checkLIR);
+            }
+
+            if (stopHere == true) {
+                DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
+                                                "REG CLOBBERED"));
+                /* Only sink store instructions */
+                if (sinkDistance && !isThisLIRLoad) {
+                    ArmLIR *newStoreLIR =
+                        (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+                    *newStoreLIR = *thisLIR;
+                    /*
+                     * Stop point found - insert *before* the checkLIR
+                     * since the instruction list is scanned in the
+                     * top-down order.
+                     */
+                    dvmCompilerInsertLIRBefore((LIR *) checkLIR,
+                                               (LIR *) newStoreLIR);
+                    thisLIR->flags.isNop = true;
+                }
+                break;
+            } else if (!checkLIR->flags.isNop) {
+                sinkDistance++;
             }
         }
     }
 }
 
+/*
+ * Perform a pass of bottom-up walk, from the second instruction in the
+ * superblock, to try to hoist loads to earlier slots.
+ */
 static void applyLoadHoisting(CompilationUnit *cUnit,
                               ArmLIR *headLIR,
                               ArmLIR *tailLIR)
 {
-    ArmLIR *thisLIR;
+    ArmLIR *thisLIR, *checkLIR;
     /*
-     * Don't want to hoist in front of first load following a barrier (or
-     * first instruction of the block.
+     * Store the list of independent instructions that can be hoisted past.
+     * Will decide the best place to insert later.
      */
-    bool firstLoad = true;
-    int maxHoist = dvmCompilerTargetOptHint(kMaxHoistDistance);
+    ArmLIR *prevInstList[MAX_HOIST_DISTANCE];
 
-    cUnit->optRound++;
-    for (thisLIR = headLIR;
+    /* Empty block */
+    if (headLIR == tailLIR) return;
+
+    /* Start from the second instruction */
+    for (thisLIR = NEXT_LIR(headLIR);
          thisLIR != tailLIR;
          thisLIR = NEXT_LIR(thisLIR)) {
-        /* Skip newly added instructions */
-        if (thisLIR->age >= cUnit->optRound ||
-            thisLIR->isNop == true) {
+
+        /* Skip non-interesting instructions */
+        if ((thisLIR->flags.isNop == true) ||
+            isPseudoOpcode(thisLIR->opcode) ||
+            !(EncodingMap[thisLIR->opcode].flags & IS_LOAD)) {
             continue;
         }
 
-        if (firstLoad && (EncodingMap[thisLIR->opcode].flags & IS_LOAD)) {
-            /*
-             * Ensure nothing will be hoisted in front of this load because
-             * it's result will likely be needed soon.
-             */
-            thisLIR->defMask |= ENCODE_MEM_USE;
-            firstLoad = false;
+        u8 stopUseAllMask = thisLIR->useMask;
+
+        /*
+         * Branches for null/range checks are marked with the true resource
+         * bits, and loads to Dalvik registers, constant pools, and non-alias
+         * locations are safe to be hoisted. So only mark the heap references
+         * conservatively here.
+         */
+        if (stopUseAllMask & ENCODE_HEAP_REF) {
+            stopUseAllMask |= ENCODE_REG_PC;
         }
 
-        firstLoad |= (thisLIR->defMask == ENCODE_ALL);
+        /* Similar as above, but just check for pure register dependency */
+        u8 stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
+        u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
 
-        if (isDalvikLoad(thisLIR)) {
-            int dRegId = DECODE_ALIAS_INFO_REG(thisLIR->aliasInfo);
-            int nativeRegId = thisLIR->operands[0];
-            ArmLIR *checkLIR;
-            int hoistDistance = 0;
-            u8 stopUseMask = (ENCODE_REG_PC | thisLIR->useMask);
-            u8 stopDefMask = thisLIR->defMask;
-            u8 checkResult;
+        int nextSlot = 0;
+        bool stopHere;
 
-            /* First check if the load can be completely elinimated */
-            for (checkLIR = PREV_LIR(thisLIR);
-                 checkLIR != headLIR;
-                 checkLIR = PREV_LIR(checkLIR)) {
+        /* Try to hoist the load to a good spot */
+        for (checkLIR = PREV_LIR(thisLIR);
+             checkLIR != headLIR;
+             checkLIR = PREV_LIR(checkLIR)) {
 
-                if (checkLIR->isNop) continue;
+            if (checkLIR->flags.isNop) continue;
 
-                /*
-                 * Check if the Dalvik register is previously accessed
-                 * with exactly the same type.
-                 */
-                if ((isDalvikLoad(checkLIR) || isDalvikStore(checkLIR)) &&
-                    (checkLIR->aliasInfo == thisLIR->aliasInfo) &&
-                    (checkLIR->operands[0] == nativeRegId)) {
-                    /*
-                     * If it is previously accessed but with a different type,
-                     * the search will terminate later at the point checking
-                     * for partially overlapping stores.
-                     */
-                    thisLIR->isNop = true;
-                    break;
-                }
+            u8 checkMemMask = checkLIR->defMask & ENCODE_MEM;
+            u8 aliasCondition = stopUseAllMask & checkMemMask;
+            stopHere = false;
 
-                /*
-                 * No earlier use/def can reach this load if:
-                 * 1) Head instruction is reached
-                 */
-                if (checkLIR == headLIR) {
-                    break;
-                }
-
-                checkResult = (stopUseMask | stopDefMask) & checkLIR->defMask;
-
-                /*
-                 * If both instructions are verified Dalvik accesses, clear the
-                 * may- and must-alias bits to detect true resource
-                 * dependencies.
-                 */
-                if (checkResult & ENCODE_DALVIK_REG) {
-                    checkResult &= ~(ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
-                }
-
-                /*
-                 * 2) load target register is clobbered
-                 * 3) A branch is seen (stopUseMask has the PC bit set).
-                 */
-                if (checkResult) {
-                    break;
-                }
-
-                /* Store data partially clobbers the Dalvik register */
-                if (isDalvikStore(checkLIR) &&
-                    isDalvikRegisterClobbered(thisLIR, checkLIR)) {
-                    break;
-                }
-            }
-
-            /* The load has been eliminated */
-            if (thisLIR->isNop) continue;
-
-            /*
-             * The load cannot be eliminated. See if it can be hoisted to an
-             * earlier spot.
-             */
-            for (checkLIR = PREV_LIR(thisLIR);
-                 /* empty by intention */;
-                 checkLIR = PREV_LIR(checkLIR)) {
-
-                if (checkLIR->isNop) continue;
-
-                /*
-                 * Check if the "thisLIR" load is redundant
-                 * NOTE: At one point, we also triggered if the checkLIR
-                 * instruction was a load.  However, that tended to insert
-                 * a load/use dependency because the full scheduler is
-                 * not yet complete.  When it is, we chould also trigger
-                 * on loads.
-                 */
-                if (isDalvikStore(checkLIR) &&
-                    (checkLIR->aliasInfo == thisLIR->aliasInfo) &&
-                    (REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId))) {
-                    /* Insert a move to replace the load */
-                    if (checkLIR->operands[0] != nativeRegId) {
-                        ArmLIR *moveLIR;
-                        moveLIR = dvmCompilerRegCopyNoInsert(
-                                    cUnit, nativeRegId, checkLIR->operands[0]);
-                        /*
-                         * Convert *thisLIR* load into a move
-                         */
-                        dvmCompilerInsertLIRAfter((LIR *) checkLIR,
-                                                  (LIR *) moveLIR);
+            /* Potential WAR alias seen - check the exact relation */
+            if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+                /* We can fully disambiguate Dalvik references */
+                if (aliasCondition == ENCODE_DALVIK_REG) {
+                    /* Must alias or partually overlap */
+                    if ((checkLIR->aliasInfo == thisLIR->aliasInfo) ||
+                        isDalvikRegisterClobbered(thisLIR, checkLIR)) {
+                        stopHere = true;
                     }
-                    thisLIR->isNop = true;
-                    break;
-
-                /* Find out if the load can be yanked past the checkLIR */
+                /* Conservatively treat all heap refs as may-alias */
                 } else {
-                    /* Last instruction reached */
-                    bool stopHere = (checkLIR == headLIR);
-
-                    /* Base address is clobbered by checkLIR */
-                    checkResult = stopUseMask & checkLIR->defMask;
-                    if (checkResult & ENCODE_DALVIK_REG) {
-                        checkResult &= ~(ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
-                    }
-                    stopHere |= (checkResult != 0);
-
-                    /* Load target clobbers use/def in checkLIR */
-                    checkResult = stopDefMask &
-                                  (checkLIR->useMask | checkLIR->defMask);
-                    if (checkResult & ENCODE_DALVIK_REG) {
-                        checkResult &= ~(ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
-                    }
-                    stopHere |= (checkResult != 0);
-
-                    /* Store data partially clobbers the Dalvik register */
-                    if (stopHere == false &&
-                        (checkLIR->defMask & ENCODE_DALVIK_REG)) {
-                        stopHere = isDalvikRegisterClobbered(thisLIR, checkLIR);
-                    }
-
-                    /*
-                     * Stop at an earlier Dalvik load if the offset of checkLIR
-                     * is not less than thisLIR
-                     *
-                     * Experiments show that doing
-                     *
-                     * ldr     r1, [r5, #16]
-                     * ldr     r0, [r5, #20]
-                     *
-                     * is much faster than
-                     *
-                     * ldr     r0, [r5, #20]
-                     * ldr     r1, [r5, #16]
-                     */
-                    if (isDalvikLoad(checkLIR)) {
-                        int dRegId2 =
-                            DECODE_ALIAS_INFO_REG(checkLIR->aliasInfo);
-                        if (dRegId2 <= dRegId) {
-                            stopHere = true;
-                        }
-                    }
-
-                    /* Don't go too far */
-                    stopHere |= (hoistDistance >= maxHoist);
-
-                    /* Found a new place to put the load - move it here */
-                    if (stopHere == true) {
-                        DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
-                                                        "HOIST LOAD"));
-                        /* The load can be hoisted for at least one cycle */
-                        if (hoistDistance != 0) {
-                            ArmLIR *newLoadLIR =
-                                dvmCompilerNew(sizeof(ArmLIR), true);
-                            *newLoadLIR = *thisLIR;
-                            newLoadLIR->age = cUnit->optRound;
-                            /*
-                             * Stop point found - insert *after* the checkLIR
-                             * since the instruction list is scanned in the
-                             * bottom-up order.
-                             */
-                            dvmCompilerInsertLIRAfter((LIR *) checkLIR,
-                                                      (LIR *) newLoadLIR);
-                            thisLIR->isNop = true;
-                        }
-                        break;
-                    }
-
-                    /*
-                     * Saw a real instruction that hosting the load is
-                     * beneficial
-                     */
-                    if (!isPseudoOpcode(checkLIR->opcode)) {
-                        hoistDistance++;
-                    }
+                    assert(aliasCondition == ENCODE_HEAP_REF);
+                    stopHere = true;
                 }
-            }
-        } else if (isLiteralLoad(thisLIR)) {
-            int litVal = thisLIR->aliasInfo;
-            int nativeRegId = thisLIR->operands[0];
-            ArmLIR *checkLIR;
-            int hoistDistance = 0;
-            u8 stopUseMask = (ENCODE_REG_PC | thisLIR->useMask) &
-                             ~ENCODE_LITPOOL_REF;
-            u8 stopDefMask = thisLIR->defMask & ~ENCODE_LITPOOL_REF;
-
-            /* First check if the load can be completely elinimated */
-            for (checkLIR = PREV_LIR(thisLIR);
-                 checkLIR != headLIR;
-                 checkLIR = PREV_LIR(checkLIR)) {
-
-                if (checkLIR->isNop) continue;
-
-                /* Reloading same literal into same tgt reg? Eliminate if so */
-                if (isLiteralLoad(checkLIR) &&
-                    (checkLIR->aliasInfo == litVal) &&
-                    (checkLIR->operands[0] == nativeRegId)) {
-                    thisLIR->isNop = true;
-                    break;
-                }
-
-                /*
-                 * No earlier use/def can reach this load if:
-                 * 1) Head instruction is reached
-                 * 2) load target register is clobbered
-                 * 3) A branch is seen (stopUseMask has the PC bit set).
-                 */
-                if ((checkLIR == headLIR) ||
-                    (stopUseMask | stopDefMask) & checkLIR->defMask) {
+                /* Memory content may be updated. Stop looking now. */
+                if (stopHere) {
+                    prevInstList[nextSlot++] = checkLIR;
                     break;
                 }
             }
 
-            /* The load has been eliminated */
-            if (thisLIR->isNop) continue;
+            if (stopHere == false) {
+                stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
+                                         checkLIR);
+            }
 
             /*
-             * The load cannot be eliminated. See if it can be hoisted to an
-             * earlier spot.
+             * Store the dependent or non-pseudo/indepedent instruction to the
+             * list.
              */
-            for (checkLIR = PREV_LIR(thisLIR);
-                 /* empty by intention */;
-                 checkLIR = PREV_LIR(checkLIR)) {
+            if (stopHere || !isPseudoOpcode(checkLIR->opcode)) {
+                prevInstList[nextSlot++] = checkLIR;
+                if (nextSlot == MAX_HOIST_DISTANCE) break;
+            }
 
-                if (checkLIR->isNop) continue;
+            /* Found a new place to put the load - move it here */
+            if (stopHere == true) {
+                DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR
+                                                "HOIST STOP"));
+                break;
+            }
+        }
+
+        /*
+         * Reached the top - use headLIR as the dependent marker as all labels
+         * are barriers.
+         */
+        if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) {
+            prevInstList[nextSlot++] = headLIR;
+        }
+
+        /*
+         * At least one independent instruction is found. Scan in the reversed
+         * direction to find a beneficial slot.
+         */
+        if (nextSlot >= 2) {
+            int firstSlot = nextSlot - 2;
+            int slot;
+            ArmLIR *depLIR = prevInstList[nextSlot-1];
+            /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
+            if (!isPseudoOpcode(depLIR->opcode) &&
+                (EncodingMap[depLIR->opcode].flags & IS_LOAD)) {
+                firstSlot -= LDLD_DISTANCE;
+            }
+            /*
+             * Make sure we check slot >= 0 since firstSlot may be negative
+             * when the loop is first entered.
+             */
+            for (slot = firstSlot; slot >= 0; slot--) {
+                ArmLIR *curLIR = prevInstList[slot];
+                ArmLIR *prevLIR = prevInstList[slot+1];
+
+                /* Check the highest instruction */
+                if (prevLIR->defMask == ENCODE_ALL) {
+                    /*
+                     * If the first instruction is a load, don't hoist anything
+                     * above it since it is unlikely to be beneficial.
+                     */
+                    if (EncodingMap[curLIR->opcode].flags & IS_LOAD) continue;
+                    /*
+                     * If the remaining number of slots is less than LD_LATENCY,
+                     * insert the hoisted load here.
+                     */
+                    if (slot < LD_LATENCY) break;
+                }
 
                 /*
-                 * TUNING: once a full scheduler exists, check here
-                 * for conversion of a redundant load into a copy similar
-                 * to the way redundant loads are handled above.
+                 * NOTE: now prevLIR is guaranteed to be a non-pseudo
+                 * instruction (ie accessing EncodingMap[prevLIR->opcode] is
+                 * safe).
+                 *
+                 * Try to find two instructions with load/use dependency until
+                 * the remaining instructions are less than LD_LATENCY.
                  */
-
-                /* Find out if the load can be yanked past the checkLIR */
-
-                /* Last instruction reached */
-                bool stopHere = (checkLIR == headLIR);
-
-                /* Base address is clobbered by checkLIR */
-                stopHere |= ((stopUseMask & checkLIR->defMask) != 0);
-
-                /* Load target clobbers use/def in checkLIR */
-                stopHere |= ((stopDefMask &
-                             (checkLIR->useMask | checkLIR->defMask)) != 0);
-
-                /* Avoid re-ordering literal pool loads */
-                stopHere |= isLiteralLoad(checkLIR);
-
-                /* Don't go too far */
-                stopHere |= (hoistDistance >= maxHoist);
-
-                /* Found a new place to put the load - move it here */
-                if (stopHere == true) {
-                    DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
-                                                    "HOIST LOAD"));
-                    /* The store can be hoisted for at least one cycle */
-                    if (hoistDistance != 0) {
-                        ArmLIR *newLoadLIR =
-                            dvmCompilerNew(sizeof(ArmLIR), true);
-                        *newLoadLIR = *thisLIR;
-                        newLoadLIR->age = cUnit->optRound;
-                        /*
-                         * Insertion is guaranteed to succeed since checkLIR
-                         * is never the first LIR on the list
-                         */
-                        dvmCompilerInsertLIRAfter((LIR *) checkLIR,
-                                                  (LIR *) newLoadLIR);
-                        thisLIR->isNop = true;
-                    }
+                if (((curLIR->useMask & prevLIR->defMask) &&
+                     (EncodingMap[prevLIR->opcode].flags & IS_LOAD)) ||
+                    (slot < LD_LATENCY)) {
                     break;
                 }
+            }
 
+            /* Found a slot to hoist to */
+            if (slot >= 0) {
+                ArmLIR *curLIR = prevInstList[slot];
+                ArmLIR *newLoadLIR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR),
+                                                               true);
+                *newLoadLIR = *thisLIR;
                 /*
-                 * Saw a real instruction that hosting the load is
-                 * beneficial
+                 * Insertion is guaranteed to succeed since checkLIR
+                 * is never the first LIR on the list
                  */
-                if (!isPseudoOpcode(checkLIR->opcode)) {
-                    hoistDistance++;
-                }
+                dvmCompilerInsertLIRBefore((LIR *) curLIR,
+                                           (LIR *) newLoadLIR);
+                thisLIR->flags.isNop = true;
             }
         }
     }
diff --git a/vm/compiler/codegen/arm/Thumb/Factory.c b/vm/compiler/codegen/arm/Thumb/Factory.c
index af255a9..7b51df1 100644
--- a/vm/compiler/codegen/arm/Thumb/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb/Factory.c
@@ -69,25 +69,16 @@
         return res;
     }
     /* No shortcut - go ahead and use literal pool */
-    ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 255);
+    ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 255);
     if (dataTarget == NULL) {
-        dataTarget = addWordData(cUnit, value, false);
+        dataTarget = addWordData(cUnit, &cUnit->literalList, value);
     }
-    ArmLIR *loadPcRel = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     loadPcRel->opcode = kThumbLdrPcRel;
     loadPcRel->generic.target = (LIR *) dataTarget;
     loadPcRel->operands[0] = tDest;
     setupResourceMasks(loadPcRel);
-    /*
-     * Special case for literal loads with a link register target.
-     * Self-cosim mode will insert calls prior to heap references
-     * after optimization, and those will destroy r14.  The easy
-     * workaround is to treat literal loads into r14 as heap references
-     * to prevent them from being hoisted.  Use of r14 in this manner
-     * is currently rare.  Revist if that changes.
-     */
-    if (rDest != rlr)
-        setMemRefType(loadPcRel, true, kLiteral);
+    setMemRefType(loadPcRel, true, kLiteral);
     loadPcRel->aliasInfo = dataTarget->operands[0];
     res = loadPcRel;
     dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel);
@@ -179,7 +170,7 @@
     ArmOpcode opcode = kThumbBkpt;
     switch (op) {
         case kOpAdd:
-            if ( !neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+            if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
                 assert((value & 0x3) == 0);
                 return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
             } else if (shortForm) {
@@ -188,7 +179,7 @@
                 opcode = kThumbAddRRR;
             break;
         case kOpSub:
-            if (!neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+            if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
                 assert((value & 0x3) == 0);
                 return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
             } else if (shortForm) {
@@ -266,12 +257,12 @@
         case kOpAdd:
             if (rDest == rSrc1)
                 return opRegImm(cUnit, op, rDest, value);
-            if ((rSrc1 == 13) && (value <= 1020)) { /* sp */
+            if ((rSrc1 == r13sp) && (value <= 1020)) { /* sp */
                 assert((value & 0x3) == 0);
                 shortForm = true;
                 opcode = kThumbAddSpRel;
                 value >>= 2;
-            } else if ((rSrc1 == 15) && (value <= 1020)) { /* pc */
+            } else if ((rSrc1 == r15pc) && (value <= 1020)) { /* pc */
                 assert((value & 0x3) == 0);
                 shortForm = true;
                 opcode = kThumbAddPcRel;
@@ -480,7 +471,7 @@
     res = newLIR3(cUnit, opcode, rDest, rBase, rNewIndex);
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     if (scale)
         dvmCompilerFreeTemp(cUnit, rNewIndex);
@@ -518,7 +509,7 @@
     res = newLIR3(cUnit, opcode, rSrc, rBase, rNewIndex);
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     if (scale)
         dvmCompilerFreeTemp(cUnit, rNewIndex);
@@ -532,7 +523,7 @@
     res = newLIR2(cUnit, kThumbLdmia, rBase, rMask);
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     genBarrier(cUnit);
     return res;
@@ -545,7 +536,7 @@
     res = newLIR2(cUnit, kThumbStmia, rBase, rMask);
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     genBarrier(cUnit);
     return res;
@@ -585,12 +576,12 @@
             }
             break;
         case kWord:
-            if (LOWREG(rDest) && (rBase == rpc) &&
+            if (LOWREG(rDest) && (rBase == r15pc) &&
                 (displacement <= 1020) && (displacement >= 0)) {
                 shortForm = true;
                 encodedDisp >>= 2;
                 opcode = kThumbLdrPcRel;
-            } else if (LOWREG(rDest) && (rBase == r13) &&
+            } else if (LOWREG(rDest) && (rBase == r13sp) &&
                       (displacement <= 1020) && (displacement >= 0)) {
                 shortForm = true;
                 encodedDisp >>= 2;
@@ -649,14 +640,14 @@
                                         : rDest;
             res = loadConstant(cUnit, rTmp, displacement);
             load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
-            if (rBase == rFP)
+            if (rBase == r5FP)
                 annotateDalvikRegAccess(load, displacement >> 2,
                                         true /* isLoad */);
             if (rTmp != rDest)
                 dvmCompilerFreeTemp(cUnit, rTmp);
         }
     }
-    if (rBase == rFP) {
+    if (rBase == r5FP) {
         if (load != NULL)
             annotateDalvikRegAccess(load, displacement >> 2,
                                     true /* isLoad */);
@@ -666,9 +657,9 @@
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (load != NULL && cUnit->heapMemOp)
-        load->branchInsertSV = true;
+        load->flags.insertWrapper = true;
     if (load2 != NULL && cUnit->heapMemOp)
-        load2->branchInsertSV = true;
+        load2->flags.insertWrapper = true;
 #endif
     return res;
 }
@@ -766,7 +757,7 @@
         }
         dvmCompilerFreeTemp(cUnit, rScratch);
     }
-    if (rBase == rFP) {
+    if (rBase == r5FP) {
         if (store != NULL)
             annotateDalvikRegAccess(store, displacement >> 2,
                                     false /* isLoad */);
@@ -776,9 +767,9 @@
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (store != NULL && cUnit->heapMemOp)
-        store->branchInsertSV = true;
+        store->flags.insertWrapper = true;
     if (store2 != NULL && cUnit->heapMemOp)
-        store2->branchInsertSV = true;
+        store2->flags.insertWrapper = true;
 #endif
     return res;
 }
@@ -819,7 +810,7 @@
 {
     ArmLIR* res;
     ArmOpcode opcode;
-    res = dvmCompilerNew(sizeof(ArmLIR), true);
+    res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     if (LOWREG(rDest) && LOWREG(rSrc))
         opcode = kThumbMovRR;
     else if (!LOWREG(rDest) && !LOWREG(rSrc))
@@ -834,7 +825,7 @@
     res->opcode = opcode;
     setupResourceMasks(res);
     if (rDest == rSrc) {
-        res->isNop = true;
+        res->flags.isNop = true;
     }
     return res;
 }
@@ -874,3 +865,50 @@
     ArmLIR *branch = newLIR2(cUnit, kThumbBCond, 0, cond);
     return branch;
 }
+
+#if defined(WITH_SELF_VERIFICATION)
+static void genSelfVerificationPreBranch(CompilationUnit *cUnit,
+                                         ArmLIR *origLIR) {
+    /*
+     * We need two separate pushes, since we want r5 to be pushed first.
+     * Store multiple will push LR first.
+     */
+    ArmLIR *pushFP = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    pushFP->opcode = kThumbPush;
+    pushFP->operands[0] = 1 << r5FP;
+    setupResourceMasks(pushFP);
+    dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) pushFP);
+
+    ArmLIR *pushLR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    pushLR->opcode = kThumbPush;
+    /* Thumb push can handle LR, but is encoded differently at bit 8 */
+    pushLR->operands[0] = 1 << 8;
+    setupResourceMasks(pushLR);
+    dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) pushLR);
+}
+
+static void genSelfVerificationPostBranch(CompilationUnit *cUnit,
+                                         ArmLIR *origLIR) {
+    /*
+     * Since Thumb cannot pop memory content into LR, we have to pop LR
+     * to a temp first (r5 in this case). Then we move r5 to LR, then pop the
+     * original r5 from stack.
+     */
+    /* Pop memory content(LR) into r5 first */
+    ArmLIR *popForLR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    popForLR->opcode = kThumbPop;
+    popForLR->operands[0] = 1 << r5FP;
+    setupResourceMasks(popForLR);
+    dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) popForLR);
+
+    ArmLIR *copy = genRegCopyNoInsert(cUnit, r14lr, r5FP);
+    dvmCompilerInsertLIRAfter((LIR *) popForLR, (LIR *) copy);
+
+    /* Now restore the original r5 */
+    ArmLIR *popFP = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    popFP->opcode = kThumbPop;
+    popFP->operands[0] = 1 << r5FP;
+    setupResourceMasks(popFP);
+    dvmCompilerInsertLIRAfter((LIR *) copy, (LIR *) popFP);
+}
+#endif
diff --git a/vm/compiler/codegen/arm/Thumb/Gen.c b/vm/compiler/codegen/arm/Thumb/Gen.c
index 37cc18d..18ef762 100644
--- a/vm/compiler/codegen/arm/Thumb/Gen.c
+++ b/vm/compiler/codegen/arm/Thumb/Gen.c
@@ -23,6 +23,62 @@
  */
 
 /*
+ * Reserve 6 bytes at the beginning of the trace
+ *        +----------------------------+
+ *        | prof count addr (4 bytes)  |
+ *        +----------------------------+
+ *        | chain cell offset (2 bytes)|
+ *        +----------------------------+
+ *
+ * ...and then code to increment the execution
+ *
+ * For continuous profiling (12 bytes):
+ *
+ *       mov   r0, pc       @ move adr of "mov r0,pc" + 4 to r0
+ *       sub   r0, #10      @ back up to addr prof count pointer
+ *       ldr   r0, [r0]     @ get address of counter
+ *       ldr   r1, [r0]
+ *       add   r1, #1
+ *       str   r1, [r0]
+ *
+ * For periodic profiling (4 bytes):
+ *       call  TEMPLATE_PERIODIC_PROFILING
+ *
+ * and return the size (in bytes) of the generated code.
+ */
+
+static int genTraceProfileEntry(CompilationUnit *cUnit)
+{
+    intptr_t addr = (intptr_t)dvmJitNextTraceCounter();
+    assert(__BYTE_ORDER == __LITTLE_ENDIAN);
+    newLIR1(cUnit, kArm16BitData, addr & 0xffff);
+    newLIR1(cUnit, kArm16BitData, (addr >> 16) & 0xffff);
+    cUnit->chainCellOffsetLIR =
+        (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
+    cUnit->headerSize = 6;
+    if ((gDvmJit.profileMode == kTraceProfilingContinuous) ||
+        (gDvmJit.profileMode == kTraceProfilingDisabled)) {
+        /* Thumb instruction used directly here to ensure correct size */
+        newLIR2(cUnit, kThumbMovRR_H2L, r0, r15pc);
+        newLIR2(cUnit, kThumbSubRI8, r0, 10);
+        newLIR3(cUnit, kThumbLdrRRI5, r0, r0, 0);
+        newLIR3(cUnit, kThumbLdrRRI5, r1, r0, 0);
+        newLIR2(cUnit, kThumbAddRI8, r1, 1);
+        newLIR3(cUnit, kThumbStrRRI5, r1, r0, 0);
+        return 12;
+    } else {
+        int opcode = TEMPLATE_PERIODIC_PROFILING;
+        newLIR2(cUnit, kThumbBlx1,
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+        newLIR2(cUnit, kThumbBlx2,
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+        return 4;
+    }
+}
+
+/*
  * Perform a "reg cmp imm" operation and jump to the PCR region if condition
  * satisfies.
  */
@@ -113,21 +169,15 @@
 void dvmCompilerInitializeRegAlloc(CompilationUnit *cUnit)
 {
     int numTemps = sizeof(coreTemps)/sizeof(int);
-    RegisterPool *pool = dvmCompilerNew(sizeof(*pool), true);
+    RegisterPool *pool = (RegisterPool *) dvmCompilerNew(sizeof(*pool), true);
     cUnit->regPool = pool;
     pool->numCoreTemps = numTemps;
-    pool->coreTemps =
+    pool->coreTemps = (RegisterInfo *)
             dvmCompilerNew(numTemps * sizeof(*pool->coreTemps), true);
     pool->numFPTemps = 0;
     pool->FPTemps = NULL;
-    pool->numCoreRegs = 0;
-    pool->coreRegs = NULL;
-    pool->numFPRegs = 0;
-    pool->FPRegs = NULL;
     dvmCompilerInitPool(pool->coreTemps, coreTemps, pool->numCoreTemps);
     dvmCompilerInitPool(pool->FPTemps, NULL, 0);
-    dvmCompilerInitPool(pool->coreRegs, NULL, 0);
-    dvmCompilerInitPool(pool->FPRegs, NULL, 0);
     pool->nullCheckedRegs =
         dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
 }
@@ -140,7 +190,7 @@
     int rAddr = dvmCompilerAllocTemp(cUnit);
     int offset = offsetof(StackSaveArea, xtra.currentPc);
     res = loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset));
-    newLIR2(cUnit, kThumbMovRR, rAddr, rFP);
+    newLIR2(cUnit, kThumbMovRR, rAddr, r5FP);
     newLIR2(cUnit, kThumbSubRI8, rAddr, sizeof(StackSaveArea) - offset);
     storeWordDisp( cUnit, rAddr, 0, rDPC);
     return res;
@@ -164,41 +214,41 @@
 
 static bool genInlinedAbsFloat(CompilationUnit *cUnit, MIR *mir)
 {
-    int offset = offsetof(InterpState, retval);
+    int offset = offsetof(Thread, retval);
     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
     int reg0 = loadValue(cUnit, rlSrc, kCoreReg).lowReg;
     int signMask = dvmCompilerAllocTemp(cUnit);
     loadConstant(cUnit, signMask, 0x7fffffff);
     newLIR2(cUnit, kThumbAndRR, reg0, signMask);
     dvmCompilerFreeTemp(cUnit, signMask);
-    storeWordDisp(cUnit, rGLUE, offset, reg0);
+    storeWordDisp(cUnit, r6SELF, offset, reg0);
     //TUNING: rewrite this to not clobber
     dvmCompilerClobber(cUnit, reg0);
-    return true;
+    return false;
 }
 
 static bool genInlinedAbsDouble(CompilationUnit *cUnit, MIR *mir)
 {
-    int offset = offsetof(InterpState, retval);
+    int offset = offsetof(Thread, retval);
     RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
     RegLocation regSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
     int reglo = regSrc.lowReg;
     int reghi = regSrc.highReg;
     int signMask = dvmCompilerAllocTemp(cUnit);
     loadConstant(cUnit, signMask, 0x7fffffff);
-    storeWordDisp(cUnit, rGLUE, offset, reglo);
+    storeWordDisp(cUnit, r6SELF, offset, reglo);
     newLIR2(cUnit, kThumbAndRR, reghi, signMask);
     dvmCompilerFreeTemp(cUnit, signMask);
-    storeWordDisp(cUnit, rGLUE, offset + 4, reghi);
+    storeWordDisp(cUnit, r6SELF, offset + 4, reghi);
     //TUNING: rewrite this to not clobber
     dvmCompilerClobber(cUnit, reghi);
-    return true;
+    return false;
 }
 
 /* No select in thumb, so we need to branch.  Thumb2 will do better */
 static bool genInlinedMinMaxInt(CompilationUnit *cUnit, MIR *mir, bool isMin)
 {
-    int offset = offsetof(InterpState, retval);
+    int offset = offsetof(Thread, retval);
     RegLocation rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
     RegLocation rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
     int reg0 = loadValue(cUnit, rlSrc1, kCoreReg).lowReg;
@@ -209,7 +259,7 @@
     newLIR2(cUnit, kThumbMovRR, reg0, reg1);
     ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
     target->defMask = ENCODE_ALL;
-    newLIR3(cUnit, kThumbStrRRI5, reg0, rGLUE, offset >> 2);
+    newLIR3(cUnit, kThumbStrRRI5, reg0, r6SELF, offset >> 2);
     branch1->generic.target = (LIR *)target;
     //TUNING: rewrite this to not clobber
     dvmCompilerClobber(cUnit,reg0);
diff --git a/vm/compiler/codegen/arm/Thumb2/Factory.c b/vm/compiler/codegen/arm/Thumb2/Factory.c
index 141c925..8045450 100644
--- a/vm/compiler/codegen/arm/Thumb2/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb2/Factory.c
@@ -56,19 +56,17 @@
     if (encodedImm >= 0) {
         return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
     }
-    ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 0);
+    ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
     if (dataTarget == NULL) {
-        dataTarget = addWordData(cUnit, value, false);
+        dataTarget = addWordData(cUnit, &cUnit->literalList, value);
     }
-    ArmLIR *loadPcRel = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     loadPcRel->opcode = kThumb2Vldrs;
     loadPcRel->generic.target = (LIR *) dataTarget;
     loadPcRel->operands[0] = rDest;
-    loadPcRel->operands[1] = rpc;
+    loadPcRel->operands[1] = r15pc;
     setupResourceMasks(loadPcRel);
-    // Self-cosim workaround.
-    if (rDest != rlr)
-        setMemRefType(loadPcRel, true, kLiteral);
+    setMemRefType(loadPcRel, true, kLiteral);
     loadPcRel->aliasInfo = dataTarget->operands[0];
     dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel);
     return loadPcRel;
@@ -166,25 +164,16 @@
         return res;
     }
     /* No shortcut - go ahead and use literal pool */
-    ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 0);
+    ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
     if (dataTarget == NULL) {
-        dataTarget = addWordData(cUnit, value, false);
+        dataTarget = addWordData(cUnit, &cUnit->literalList, value);
     }
-    ArmLIR *loadPcRel = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     loadPcRel->opcode = kThumb2LdrPcRel12;
     loadPcRel->generic.target = (LIR *) dataTarget;
     loadPcRel->operands[0] = rDest;
     setupResourceMasks(loadPcRel);
-    /*
-     * Special case for literal loads with a link register target.
-     * Self-cosim mode will insert calls prior to heap references
-     * after optimization, and those will destroy r14.  The easy
-     * workaround is to treat literal loads into r14 as heap references
-     * to prevent them from being hoisted.  Use of r14 in this manner
-     * is currently rare.  Revisit if that changes.
-     */
-    if (rDest != rlr)
-        setMemRefType(loadPcRel, true, kLiteral);
+    setMemRefType(loadPcRel, true, kLiteral);
     loadPcRel->aliasInfo = dataTarget->operands[0];
     res = loadPcRel;
     dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel);
@@ -234,12 +223,30 @@
 {
     ArmOpcode opcode = kThumbBkpt;
     switch (op) {
-        case kOpPush:
-            opcode = ((value & 0xff00) != 0) ? kThumb2Push : kThumbPush;
+        case kOpPush: {
+            if ((value & 0xff00) == 0) {
+                opcode = kThumbPush;
+            } else if ((value & 0xff00) == (1 << r14lr)) {
+                /* Thumb push can handle lr, which is encoded by bit 8 */
+                opcode = kThumbPush;
+                value = (value & 0xff) | (1<<8);
+            } else {
+                opcode = kThumb2Push;
+            }
             break;
-        case kOpPop:
-            opcode = ((value & 0xff00) != 0) ? kThumb2Pop : kThumbPop;
+        }
+        case kOpPop: {
+            if ((value & 0xff00) == 0) {
+                opcode = kThumbPop;
+            } else if ((value & 0xff00) == (1 << r15pc)) {
+                /* Thumb pop can handle pc, which is encoded by bit 8 */
+                opcode = kThumbPop;
+                value = (value & 0xff) | (1<<8);
+            } else {
+                opcode = kThumb2Pop;
+            }
             break;
+        }
         default:
             assert(0);
     }
@@ -482,11 +489,11 @@
         case kOpRor:
             return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
         case kOpAdd:
-            if (LOWREG(rDest) && (rSrc1 == 13) &&
+            if (LOWREG(rDest) && (rSrc1 == r13sp) &&
                 (value <= 1020) && ((value & 0x3)==0)) {
                 return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1,
                                value >> 2);
-            } else if (LOWREG(rDest) && (rSrc1 == rpc) &&
+            } else if (LOWREG(rDest) && (rSrc1 == r15pc) &&
                        (value <= 1020) && ((value & 0x3)==0)) {
                 return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1,
                                value >> 2);
@@ -583,7 +590,7 @@
     ArmOpcode opcode = kThumbBkpt;
     switch (op) {
         case kOpAdd:
-            if ( !neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+            if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
                 assert((value & 0x3) == 0);
                 return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
             } else if (shortForm) {
@@ -591,7 +598,7 @@
             }
             break;
         case kOpSub:
-            if (!neg && (rDestSrc1 == 13) && (value <= 508)) { /* sp */
+            if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
                 assert((value & 0x3) == 0);
                 return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
             } else if (shortForm) {
@@ -704,7 +711,7 @@
             load = newLIR3(cUnit, opcode, rDest, regPtr, 0);
 #if defined(WITH_SELF_VERIFICATION)
             if (cUnit->heapMemOp)
-                load->branchInsertSV = true;
+                load->flags.insertWrapper = true;
 #endif
             return load;
         case kWord:
@@ -732,7 +739,7 @@
 
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        load->branchInsertSV = true;
+        load->flags.insertWrapper = true;
 #endif
     return load;
 }
@@ -768,7 +775,7 @@
             store = newLIR3(cUnit, opcode, rSrc, regPtr, 0);
 #if defined(WITH_SELF_VERIFICATION)
             if (cUnit->heapMemOp)
-                store->branchInsertSV = true;
+                store->flags.insertWrapper = true;
 #endif
             return store;
         case kWord:
@@ -792,7 +799,7 @@
 
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        store->branchInsertSV = true;
+        store->flags.insertWrapper = true;
 #endif
     return store;
 }
@@ -844,12 +851,12 @@
                 }
                 break;
             }
-            if (LOWREG(rDest) && (rBase == rpc) &&
+            if (LOWREG(rDest) && (rBase == r15pc) &&
                 (displacement <= 1020) && (displacement >= 0)) {
                 shortForm = true;
                 encodedDisp >>= 2;
                 opcode = kThumbLdrPcRel;
-            } else if (LOWREG(rDest) && (rBase == r13) &&
+            } else if (LOWREG(rDest) && (rBase == r13sp) &&
                       (displacement <= 1020) && (displacement >= 0)) {
                 shortForm = true;
                 encodedDisp >>= 2;
@@ -909,12 +916,12 @@
         dvmCompilerFreeTemp(cUnit, regOffset);
     }
 
-    if (rBase == rFP) {
+    if (rBase == r5FP) {
         annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */);
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        load->branchInsertSV = true;
+        load->flags.insertWrapper = true;
 #endif
     return res;
 }
@@ -1022,12 +1029,12 @@
         dvmCompilerFreeTemp(cUnit, rScratch);
     }
 
-    if (rBase == rFP) {
+    if (rBase == r5FP) {
         annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */);
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        store->branchInsertSV = true;
+        store->flags.insertWrapper = true;
 #endif
     return res;
 }
@@ -1055,7 +1062,7 @@
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     genBarrier(cUnit);
     return res;
@@ -1072,7 +1079,7 @@
     }
 #if defined(WITH_SELF_VERIFICATION)
     if (cUnit->heapMemOp)
-        res->branchInsertSV = true;
+        res->flags.insertWrapper = true;
 #endif
     genBarrier(cUnit);
     return res;
@@ -1121,11 +1128,11 @@
 
 static ArmLIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
 {
-    ArmLIR* res = dvmCompilerNew(sizeof(ArmLIR), true);
+    ArmLIR* res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     res->operands[0] = rDest;
     res->operands[1] = rSrc;
     if (rDest == rSrc) {
-        res->isNop = true;
+        res->flags.isNop = true;
     } else {
         assert(DOUBLEREG(rDest) == DOUBLEREG(rSrc));
         if (DOUBLEREG(rDest)) {
@@ -1151,7 +1158,7 @@
     ArmOpcode opcode;
     if (FPREG(rDest) || FPREG(rSrc))
         return fpRegCopy(cUnit, rDest, rSrc);
-    res = dvmCompilerNew(sizeof(ArmLIR), true);
+    res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
     if (LOWREG(rDest) && LOWREG(rSrc))
         opcode = kThumbMovRR;
     else if (!LOWREG(rDest) && !LOWREG(rSrc))
@@ -1166,7 +1173,7 @@
     res->opcode = opcode;
     setupResourceMasks(res);
     if (rDest == rSrc) {
-        res->isNop = true;
+        res->flags.isNop = true;
     }
     return res;
 }
@@ -1206,3 +1213,25 @@
         }
     }
 }
+
+#if defined(WITH_SELF_VERIFICATION)
+static void genSelfVerificationPreBranch(CompilationUnit *cUnit,
+                                         ArmLIR *origLIR) {
+    ArmLIR *push = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    push->opcode = kThumbPush;
+    /* Thumb push can handle LR (encoded at bit 8) */
+    push->operands[0] = (1 << r5FP | 1 << 8);
+    setupResourceMasks(push);
+    dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) push);
+}
+
+static void genSelfVerificationPostBranch(CompilationUnit *cUnit,
+                                         ArmLIR *origLIR) {
+    ArmLIR *pop = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+    /* Thumb pop cannot store into LR - use Thumb2 here */
+    pop->opcode = kThumb2Pop;
+    pop->operands[0] = (1 << r5FP | 1 << r14lr);
+    setupResourceMasks(pop);
+    dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) pop);
+}
+#endif
diff --git a/vm/compiler/codegen/arm/Thumb2/Gen.c b/vm/compiler/codegen/arm/Thumb2/Gen.c
index 825b271..f54b7eb 100644
--- a/vm/compiler/codegen/arm/Thumb2/Gen.c
+++ b/vm/compiler/codegen/arm/Thumb2/Gen.c
@@ -15,13 +15,64 @@
  */
 
 /*
- * This file contains codegen for the Thumb ISA and is intended to be
+ * This file contains codegen for the Thumb2 ISA and is intended to be
  * includes by:
  *
  *        Codegen-$(TARGET_ARCH_VARIANT).c
  *
  */
 
+/*
+ * Reserve 6 bytes at the beginning of the trace
+ *        +----------------------------+
+ *        | prof count addr (4 bytes)  |
+ *        +----------------------------+
+ *        | chain cell offset (2 bytes)|
+ *        +----------------------------+
+ *
+ * ...and then code to increment the execution
+ *
+ * For continuous profiling (10 bytes)
+ *       ldr   r0, [pc-8]   @ get prof count addr    [4 bytes]
+ *       ldr   r1, [r0]     @ load counter           [2 bytes]
+ *       add   r1, #1       @ increment              [2 bytes]
+ *       str   r1, [r0]     @ store                  [2 bytes]
+ *
+ * For periodic profiling (4 bytes)
+ *       call  TEMPLATE_PERIODIC_PROFILING
+ *
+ * and return the size (in bytes) of the generated code.
+ */
+
+static int genTraceProfileEntry(CompilationUnit *cUnit)
+{
+    intptr_t addr = (intptr_t)dvmJitNextTraceCounter();
+    assert(__BYTE_ORDER == __LITTLE_ENDIAN);
+    newLIR1(cUnit, kArm16BitData, addr & 0xffff);
+    newLIR1(cUnit, kArm16BitData, (addr >> 16) & 0xffff);
+    cUnit->chainCellOffsetLIR =
+        (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
+    cUnit->headerSize = 6;
+    if ((gDvmJit.profileMode == kTraceProfilingContinuous) ||
+        (gDvmJit.profileMode == kTraceProfilingDisabled)) {
+        /* Thumb[2] instruction used directly here to ensure correct size */
+        newLIR2(cUnit, kThumb2LdrPcReln12, r0, 8);
+        newLIR3(cUnit, kThumbLdrRRI5, r1, r0, 0);
+        newLIR2(cUnit, kThumbAddRI8, r1, 1);
+        newLIR3(cUnit, kThumbStrRRI5, r1, r0, 0);
+        return 10;
+    } else {
+        int opcode = TEMPLATE_PERIODIC_PROFILING;
+        newLIR2(cUnit, kThumbBlx1,
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+        newLIR2(cUnit, kThumbBlx2,
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+        return 4;
+    }
+}
+
 static void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest,
                         RegLocation rlSrc)
 {
@@ -89,22 +140,16 @@
 {
     int numTemps = sizeof(coreTemps)/sizeof(int);
     int numFPTemps = sizeof(fpTemps)/sizeof(int);
-    RegisterPool *pool = dvmCompilerNew(sizeof(*pool), true);
+    RegisterPool *pool = (RegisterPool *)dvmCompilerNew(sizeof(*pool), true);
     cUnit->regPool = pool;
     pool->numCoreTemps = numTemps;
-    pool->coreTemps =
+    pool->coreTemps = (RegisterInfo *)
             dvmCompilerNew(numTemps * sizeof(*cUnit->regPool->coreTemps), true);
     pool->numFPTemps = numFPTemps;
-    pool->FPTemps =
+    pool->FPTemps = (RegisterInfo *)
             dvmCompilerNew(numFPTemps * sizeof(*cUnit->regPool->FPTemps), true);
-    pool->numCoreRegs = 0;
-    pool->coreRegs = NULL;
-    pool->numFPRegs = 0;
-    pool->FPRegs = NULL;
     dvmCompilerInitPool(pool->coreTemps, coreTemps, pool->numCoreTemps);
     dvmCompilerInitPool(pool->FPTemps, fpTemps, pool->numFPTemps);
-    dvmCompilerInitPool(pool->coreRegs, NULL, 0);
-    dvmCompilerInitPool(pool->FPRegs, NULL, 0);
     pool->nullCheckedRegs =
         dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
 }
@@ -156,7 +201,7 @@
     int offset = offsetof(StackSaveArea, xtra.currentPc);
     int rDPC = dvmCompilerAllocTemp(cUnit);
     res = loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset));
-    newLIR3(cUnit, kThumb2StrRRI8Predec, rDPC, rFP,
+    newLIR3(cUnit, kThumb2StrRRI8Predec, rDPC, r5FP,
             sizeof(StackSaveArea) - offset);
     dvmCompilerFreeTemp(cUnit, rDPC);
     return res;
@@ -201,9 +246,8 @@
     loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
     dvmCompilerLockAllTemps(cUnit);  // Prepare for explicit register usage
     dvmCompilerFreeTemp(cUnit, r4PC);  // Free up r4 for general use
-    loadWordDisp(cUnit, rGLUE, offsetof(InterpState, self), r0); // Get self
     genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
-    loadWordDisp(cUnit, r0, offsetof(Thread, threadId), r3); // Get threadId
+    loadWordDisp(cUnit, r6SELF, offsetof(Thread, threadId), r3); // Get threadId
     newLIR3(cUnit, kThumb2Ldrex, r2, r1,
             offsetof(Object, lock) >> 2); // Get object->lock
     opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT); // Align owner
@@ -227,10 +271,11 @@
     loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
                  dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
     // Export PC (part 2)
-    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, rFP,
+    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, r5FP,
             sizeof(StackSaveArea) -
             offsetof(StackSaveArea, xtra.currentPc));
     /* Call template, and don't return */
+    genRegCopy(cUnit, r0, r6SELF);
     genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
     // Resume here
     target = newLIR0(cUnit, kArmPseudoTargetLabel);
@@ -256,10 +301,9 @@
     loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
     dvmCompilerLockAllTemps(cUnit);  // Prepare for explicit register usage
     dvmCompilerFreeTemp(cUnit, r4PC);  // Free up r4 for general use
-    loadWordDisp(cUnit, rGLUE, offsetof(InterpState, self), r0); // Get self
     genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
     loadWordDisp(cUnit, r1, offsetof(Object, lock), r2); // Get object->lock
-    loadWordDisp(cUnit, r0, offsetof(Thread, threadId), r3); // Get threadId
+    loadWordDisp(cUnit, r6SELF, offsetof(Thread, threadId), r3); // Get threadId
     // Is lock unheld on lock or held by us (==threadId) on unlock?
     opRegRegImm(cUnit, kOpAnd, r7, r2,
                 (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
@@ -280,8 +324,9 @@
     loadConstant(cUnit, r3, (int) (cUnit->method->insns + mir->offset));
 
     LOAD_FUNC_ADDR(cUnit, r7, (int)dvmUnlockObject);
+    genRegCopy(cUnit, r0, r6SELF);
     // Export PC (part 2)
-    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, rFP,
+    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, r5FP,
             sizeof(StackSaveArea) -
             offsetof(StackSaveArea, xtra.currentPc));
     opReg(cUnit, kOpBlx, r7);
@@ -366,7 +411,7 @@
     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true);
     newLIR2(cUnit, kThumb2Vabss, rlResult.lowReg, rlSrc.lowReg);
     storeValue(cUnit, rlDest, rlResult);
-    return true;
+    return false;
 }
 
 static bool genInlinedAbsDouble(CompilationUnit *cUnit, MIR *mir)
@@ -378,7 +423,7 @@
     newLIR2(cUnit, kThumb2Vabsd, S2D(rlResult.lowReg, rlResult.highReg),
             S2D(rlSrc.lowReg, rlSrc.highReg));
     storeValueWide(cUnit, rlDest, rlResult);
-    return true;
+    return false;
 }
 
 static bool genInlinedMinMaxInt(CompilationUnit *cUnit, MIR *mir, bool isMin)
diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
index f584ce7..076f5f1 100644
--- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
@@ -74,11 +74,15 @@
      * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
      * offset from the struct is less than 128.
      */
-    if ((offsetof(InterpState, jitToInterpEntries) +
+    if ((offsetof(Thread, jitToInterpEntries) +
          sizeof(struct JitToInterpEntries)) >= 128) {
-        LOGE("InterpState.jitToInterpEntries size overflow");
+        LOGE("Thread.jitToInterpEntries size overflow");
         dvmAbort();
     }
+
+    /* No method JIT for Thumb backend */
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
     return true;
 }
 
diff --git a/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c b/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c
index d17965d..a2d77ea 100644
--- a/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c
+++ b/vm/compiler/codegen/arm/armv5te-vfp/Codegen.c
@@ -49,5 +49,8 @@
 /* MIR2LIR dispatcher and architectural independent codegen routines */
 #include "../CodegenDriver.c"
 
+/* Dummy driver for method-based JIT */
+#include "../armv5te/MethodCodegenDriver.c"
+
 /* Architecture manifest */
 #include "ArchVariant.c"
diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.c b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
index cf48d4e..73d27f9 100644
--- a/vm/compiler/codegen/arm/armv5te/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
@@ -74,11 +74,15 @@
      * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
      * offset from the struct is less than 128.
      */
-    if ((offsetof(InterpState, jitToInterpEntries) +
+    if ((offsetof(Thread, jitToInterpEntries) +
          sizeof(struct JitToInterpEntries)) >= 128) {
-        LOGE("InterpState.jitToInterpEntries size overflow");
+        LOGE("Thread.jitToInterpEntries size overflow");
         dvmAbort();
     }
+
+    /* No method JIT for Thumb backend */
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
     return true;
 }
 
diff --git a/vm/compiler/codegen/arm/armv5te/Codegen.c b/vm/compiler/codegen/arm/armv5te/Codegen.c
index 03c1435..f74d968 100644
--- a/vm/compiler/codegen/arm/armv5te/Codegen.c
+++ b/vm/compiler/codegen/arm/armv5te/Codegen.c
@@ -49,5 +49,8 @@
 /* MIR2LIR dispatcher and architectural independent codegen routines */
 #include "../CodegenDriver.c"
 
+/* Dummy driver for method-based JIT */
+#include "MethodCodegenDriver.c"
+
 /* Architecture manifest */
 #include "ArchVariant.c"
diff --git a/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c b/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c
new file mode 100644
index 0000000..20779f3
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv5te/MethodCodegenDriver.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit)
+{
+    LOGE("Method-based JIT not supported for the v5te target");
+    dvmAbort();
+}
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
index 7fcf031..bcd6a46 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
@@ -69,11 +69,15 @@
      * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
      * offset from the struct is less than 128.
      */
-    if ((offsetof(InterpState, jitToInterpEntries) +
+    if ((offsetof(Thread, jitToInterpEntries) +
          sizeof(struct JitToInterpEntries)) >= 128) {
-        LOGE("InterpState.jitToInterpEntries size overflow");
+        LOGE("Thread.jitToInterpEntries size overflow");
         dvmAbort();
     }
+
+    /* FIXME - comment out the following to enable method-based JIT */
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
     return true;
 }
 
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c b/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c
index f0b7722..439add5 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c
+++ b/vm/compiler/codegen/arm/armv7-a-neon/Codegen.c
@@ -49,5 +49,8 @@
 /* MIR2LIR dispatcher and architectural independent codegen routines */
 #include "../CodegenDriver.c"
 
+/* Driver for method-based JIT */
+#include "MethodCodegenDriver.c"
+
 /* Architecture manifest */
 #include "ArchVariant.c"
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c
new file mode 100644
index 0000000..5a08b60
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv7-a-neon/MethodCodegenDriver.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Rebuild the interpreter frame then punt to the interpreter to execute
+ * instruction at specified PC.
+ *
+ * Currently parameters are passed to the current frame, so we just need to
+ * grow the stack save area above it, fill certain fields in StackSaveArea and
+ * Thread that are skipped during whole-method invocation (specified below),
+ * then return to the interpreter.
+ *
+ * StackSaveArea:
+ *  - prevSave
+ *  - prevFrame
+ *  - savedPc
+ *  - returnAddr
+ *  - method
+ *
+ * Thread:
+ *  - method
+ *  - methodClassDex
+ *  - curFrame
+ */
+static void genMethodInflateAndPunt(CompilationUnit *cUnit, MIR *mir,
+                                    BasicBlock *bb)
+{
+    int oldStackSave = r0;
+    int newStackSave = r1;
+    int oldFP = r2;
+    int savedPC = r3;
+    int currentPC = r4PC;
+    int returnAddr = r7;
+    int method = r8;
+    int pDvmDex = r9;
+
+    /*
+     * TODO: check whether to raise the stack overflow exception when growing
+     * the stack save area.
+     */
+
+    /* Send everything to home location */
+    dvmCompilerFlushAllRegs(cUnit);
+
+    /* oldStackSave = r5FP + sizeof(current frame) */
+    opRegRegImm(cUnit, kOpAdd, oldStackSave, r5FP,
+                cUnit->method->registersSize * 4);
+    /* oldFP = oldStackSave + sizeof(stackSaveArea) */
+    opRegRegImm(cUnit, kOpAdd, oldFP, oldStackSave, sizeof(StackSaveArea));
+    /* newStackSave = r5FP - sizeof(StackSaveArea) */
+    opRegRegImm(cUnit, kOpSub, newStackSave, r5FP, sizeof(StackSaveArea));
+
+    loadWordDisp(cUnit, r13sp, 0, savedPC);
+    loadConstant(cUnit, currentPC, (int) (cUnit->method->insns + mir->offset));
+    loadConstant(cUnit, method, (int) cUnit->method);
+    loadConstant(cUnit, pDvmDex, (int) cUnit->method->clazz->pDvmDex);
+#ifdef EASY_GDB
+    /* newStackSave->prevSave = oldStackSave */
+    storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, prevSave),
+                  oldStackSave);
+#endif
+    /* newStackSave->prevSave = oldStackSave */
+    storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, prevFrame),
+                  oldFP);
+    /* newStackSave->savedPc = savedPC */
+    storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, savedPc),
+                  savedPC);
+    /* return address */
+    loadConstant(cUnit, returnAddr, 0);
+    storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, returnAddr),
+                  returnAddr);
+    /* newStackSave->method = method */
+    storeWordDisp(cUnit, newStackSave, offsetof(StackSaveArea, method), method);
+    /* thread->method = method */
+    storeWordDisp(cUnit, r6SELF, offsetof(InterpSaveState, method), method);
+    /* thread->curFrame = current FP */
+    storeWordDisp(cUnit, r6SELF, offsetof(Thread, curFrame), r5FP);
+    /* thread->methodClassDex = pDvmDex */
+    storeWordDisp(cUnit, r6SELF, offsetof(InterpSaveState, methodClassDex),
+                  pDvmDex);
+    /* Restore the stack pointer */
+    opRegImm(cUnit, kOpAdd, r13sp, 16);
+    genPuntToInterp(cUnit, mir->offset);
+}
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ *
+ * TODO - most them are just pass-through to the trace-based versions for now
+ */
+static bool handleMethodFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
+                                             BasicBlock *bb, ArmLIR *labelList)
+{
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch && gDvmJit.genSuspendPoll) {
+        genSuspendPoll(cUnit, mir);
+    }
+
+    /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
+    genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+    return false;
+}
+
+static bool handleMethodFmt10x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    switch (dalvikOpcode) {
+        case OP_RETURN_VOID:
+            return false;
+        default:
+            return handleFmt10x(cUnit, mir);
+    }
+}
+
+static bool handleMethodFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt11n_Fmt31i(cUnit, mir);
+}
+
+static bool handleMethodFmt11x(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+                               ArmLIR *labelList)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    switch (dalvikOpcode) {
+        case OP_THROW:
+            genMethodInflateAndPunt(cUnit, mir, bb);
+            return false;
+        default:
+            return handleFmt11x(cUnit, mir);
+    }
+}
+
+static bool handleMethodFmt12x(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt12x(cUnit, mir);
+}
+
+static bool handleMethodFmt20bc_Fmt40sc(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt20bc_Fmt40sc(cUnit, mir);
+}
+
+static bool handleMethodFmt21c_Fmt31c_Fmt41c(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+}
+
+static bool handleMethodFmt21h(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt21h(cUnit, mir);
+}
+
+static bool handleMethodFmt21s(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt21s(cUnit, mir);
+}
+
+static bool handleMethodFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+                               ArmLIR *labelList)
+{
+    return handleFmt21t(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt22b_Fmt22s(cUnit, mir);
+}
+
+static bool handleMethodFmt22c_Fmt52c(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt22c_Fmt52c(cUnit, mir);
+}
+
+static bool handleMethodFmt22cs(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt22cs(cUnit, mir);
+}
+
+static bool handleMethodFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+                               ArmLIR *labelList)
+{
+    return handleFmt22t(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt22x_Fmt32x(cUnit, mir);
+}
+
+static bool handleMethodFmt23x(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt23x(cUnit, mir);
+}
+
+static bool handleMethodFmt31t(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt31t(cUnit, mir);
+}
+
+static bool handleMethodFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
+                                       BasicBlock *bb, ArmLIR *labelList)
+{
+    return handleFmt35c_3rc_5rc(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
+                                     BasicBlock *bb, ArmLIR *labelList)
+{
+    return handleFmt35ms_3rms(cUnit, mir, bb, labelList);
+}
+
+static bool handleMethodExecuteInline(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleExecuteInline(cUnit, mir);
+}
+
+static bool handleMethodFmt51l(CompilationUnit *cUnit, MIR *mir)
+{
+    return handleFmt51l(cUnit, mir);
+}
+
+/* Handle the content in each basic block */
+static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb)
+{
+    MIR *mir;
+    ArmLIR *labelList = (ArmLIR *) cUnit->blockLabelList;
+    int blockId = bb->id;
+
+    cUnit->curBlock = bb;
+    labelList[blockId].operands[0] = bb->startOffset;
+
+    /* Insert the block label */
+    labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
+    dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
+
+    dvmCompilerClobberAllRegs(cUnit);
+    dvmCompilerResetNullCheck(cUnit);
+
+    ArmLIR *headLIR = NULL;
+
+    if (bb->blockType == kMethodEntryBlock) {
+        /* r0 = callsitePC */
+        opImm(cUnit, kOpPush, (1 << r0 | 1 << r1 | 1 << r5FP | 1 << r14lr));
+        opRegImm(cUnit, kOpSub, r5FP,
+                 sizeof(StackSaveArea) + cUnit->method->registersSize * 4);
+
+    } else if (bb->blockType == kMethodExitBlock) {
+        /* No need to pop r0 and r1 */
+        opRegImm(cUnit, kOpAdd, r13sp, 8);
+        opImm(cUnit, kOpPop, (1 << r5FP | 1 << r15pc));
+    }
+
+    for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+
+        dvmCompilerResetRegPool(cUnit);
+        if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
+            dvmCompilerClobberAllRegs(cUnit);
+        }
+
+        if (gDvmJit.disableOpt & (1 << kSuppressLoads)) {
+            dvmCompilerResetDefTracking(cUnit);
+        }
+
+        Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+        InstructionFormat dalvikFormat =
+            dexGetFormatFromOpcode(dalvikOpcode);
+
+        ArmLIR *boundaryLIR;
+
+        /*
+         * Don't generate the boundary LIR unless we are debugging this
+         * trace or we need a scheduling barrier.
+         */
+        if (headLIR == NULL || cUnit->printMe == true) {
+            boundaryLIR =
+                newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary,
+                        mir->offset,
+                        (int) dvmCompilerGetDalvikDisassembly(
+                            &mir->dalvikInsn, ""));
+            /* Remember the first LIR for this block */
+            if (headLIR == NULL) {
+                headLIR = boundaryLIR;
+                /* Set the first boundaryLIR as a scheduling barrier */
+                headLIR->defMask = ENCODE_ALL;
+            }
+        }
+
+        /* Don't generate the SSA annotation unless verbose mode is on */
+        if (cUnit->printMe && mir->ssaRep) {
+            char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
+            newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
+        }
+
+        bool notHandled;
+        switch (dalvikFormat) {
+            case kFmt10t:
+            case kFmt20t:
+            case kFmt30t:
+                notHandled = handleMethodFmt10t_Fmt20t_Fmt30t(cUnit, mir, bb,
+                                                              labelList);
+                break;
+            case kFmt10x:
+                notHandled = handleMethodFmt10x(cUnit, mir);
+                break;
+            case kFmt11n:
+            case kFmt31i:
+                notHandled = handleMethodFmt11n_Fmt31i(cUnit, mir);
+                break;
+            case kFmt11x:
+                notHandled = handleMethodFmt11x(cUnit, mir, bb, labelList);
+                break;
+            case kFmt12x:
+                notHandled = handleMethodFmt12x(cUnit, mir);
+                break;
+            case kFmt20bc:
+            case kFmt40sc:
+                notHandled = handleMethodFmt20bc_Fmt40sc(cUnit, mir);
+                break;
+            case kFmt21c:
+            case kFmt31c:
+            case kFmt41c:
+                notHandled = handleMethodFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+                break;
+            case kFmt21h:
+                notHandled = handleMethodFmt21h(cUnit, mir);
+                break;
+            case kFmt21s:
+                notHandled = handleMethodFmt21s(cUnit, mir);
+                break;
+            case kFmt21t:
+                notHandled = handleMethodFmt21t(cUnit, mir, bb, labelList);
+                break;
+            case kFmt22b:
+            case kFmt22s:
+                notHandled = handleMethodFmt22b_Fmt22s(cUnit, mir);
+                break;
+            case kFmt22c:
+            case kFmt52c:
+                notHandled = handleMethodFmt22c_Fmt52c(cUnit, mir);
+                break;
+            case kFmt22cs:
+                notHandled = handleMethodFmt22cs(cUnit, mir);
+                break;
+            case kFmt22t:
+                notHandled = handleMethodFmt22t(cUnit, mir, bb, labelList);
+                break;
+            case kFmt22x:
+            case kFmt32x:
+                notHandled = handleMethodFmt22x_Fmt32x(cUnit, mir);
+                break;
+            case kFmt23x:
+                notHandled = handleMethodFmt23x(cUnit, mir);
+                break;
+            case kFmt31t:
+                notHandled = handleMethodFmt31t(cUnit, mir);
+                break;
+            case kFmt3rc:
+            case kFmt35c:
+            case kFmt5rc:
+                notHandled = handleMethodFmt35c_3rc_5rc(cUnit, mir, bb,
+                                                        labelList);
+                break;
+            case kFmt3rms:
+            case kFmt35ms:
+                notHandled = handleMethodFmt35ms_3rms(cUnit, mir, bb,
+                                                      labelList);
+                break;
+            case kFmt35mi:
+            case kFmt3rmi:
+                notHandled = handleMethodExecuteInline(cUnit, mir);
+                break;
+            case kFmt51l:
+                notHandled = handleMethodFmt51l(cUnit, mir);
+                break;
+            default:
+                notHandled = true;
+                break;
+        }
+
+        /* FIXME - to be implemented */
+        if (notHandled == true && dalvikOpcode >= kNumPackedOpcodes) {
+            notHandled = false;
+        }
+
+        if (notHandled) {
+            LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n",
+                 mir->offset,
+                 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
+                 dalvikFormat);
+            dvmCompilerAbort(cUnit);
+            break;
+        }
+    }
+
+    if (headLIR) {
+        /*
+         * Eliminate redundant loads/stores and delay stores into later
+         * slots
+         */
+        dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR,
+                                           cUnit->lastLIRInsn);
+
+        /*
+         * Generate an unconditional branch to the fallthrough block.
+         */
+        if (bb->fallThrough) {
+            genUnconditionalBranch(cUnit,
+                                   &labelList[bb->fallThrough->id]);
+        }
+    }
+    return false;
+}
+
+void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit)
+{
+    // FIXME - enable method compilation for selected routines here
+    if (strcmp(cUnit->method->name, "add")) return;
+
+    /* Used to hold the labels of each block */
+    cUnit->blockLabelList =
+        (void *) dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
+
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
+                                          kPreOrderDFSTraversal,
+                                          false /* isIterative */);
+
+    dvmCompilerApplyGlobalOptimizations(cUnit);
+
+    // FIXME - temporarily enable verbose printing for all methods
+    cUnit->printMe = true;
+
+#if defined(WITH_SELF_VERIFICATION)
+    selfVerificationBranchInsertPass(cUnit);
+#endif
+}
diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
index 7fcf031..bcd6a46 100644
--- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
@@ -69,11 +69,15 @@
      * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
      * offset from the struct is less than 128.
      */
-    if ((offsetof(InterpState, jitToInterpEntries) +
+    if ((offsetof(Thread, jitToInterpEntries) +
          sizeof(struct JitToInterpEntries)) >= 128) {
-        LOGE("InterpState.jitToInterpEntries size overflow");
+        LOGE("Thread.jitToInterpEntries size overflow");
         dvmAbort();
     }
+
+    /* FIXME - comment out the following to enable method-based JIT */
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
     return true;
 }
 
diff --git a/vm/compiler/codegen/arm/armv7-a/Codegen.c b/vm/compiler/codegen/arm/armv7-a/Codegen.c
index 05dda0c..36771ef 100644
--- a/vm/compiler/codegen/arm/armv7-a/Codegen.c
+++ b/vm/compiler/codegen/arm/armv7-a/Codegen.c
@@ -49,5 +49,8 @@
 /* MIR2LIR dispatcher and architectural independent codegen routines */
 #include "../CodegenDriver.c"
 
+/* Driver for method-based JIT */
+#include "../armv7-a-neon/MethodCodegenDriver.c"
+
 /* Architecture manifest */
 #include "ArchVariant.c"
diff --git a/vm/compiler/codegen/x86/ArchUtility.c b/vm/compiler/codegen/x86/ArchUtility.c
index 171c3b5..f7c48d6 100644
--- a/vm/compiler/codegen/x86/ArchUtility.c
+++ b/vm/compiler/codegen/x86/ArchUtility.c
@@ -22,3 +22,9 @@
 void dvmCompilerCodegenDump(CompilationUnit *cUnit)
 {
 }
+
+/* Target-specific cache flushing (not needed for x86 */
+int dvmCompilerCacheFlush(long start, long end, long flags)
+{
+    return 0;
+}
diff --git a/vm/compiler/codegen/x86/Assemble.c b/vm/compiler/codegen/x86/Assemble.c
index 3895d77..03edbf1 100644
--- a/vm/compiler/codegen/x86/Assemble.c
+++ b/vm/compiler/codegen/x86/Assemble.c
@@ -20,7 +20,6 @@
 #include "../../CompilerInternals.h"
 #include "X86LIR.h"
 #include "Codegen.h"
-#include <unistd.h>             /* for cacheflush */
 #include <sys/mman.h>           /* for protection change */
 
 #define MAX_ASSEMBLER_RETRIES 10
@@ -34,8 +33,6 @@
 #endif
 
 /*
- * FIXME - redo for x86
- *
  * Translation layout in the code cache.  Note that the codeAddress pointer
  * in JitTable will point directly to the code body (field codeAddress).  The
  * chain cell offset codeAddress - 2, and (if present) executionCount is at
@@ -52,7 +49,7 @@
  *   |  .                            .
  *   |  |                            |
  *   |  +----------------------------+
- *   |  | Chaining Cells             |  -> 12/16 bytes each, must be 4 byte aligned
+ *   |  | Chaining Cells             |  -> 16 bytes each, 8 byte aligned
  *   |  .                            .
  *   |  .                            .
  *   |  |                            |
@@ -66,8 +63,8 @@
  *      |                            |
  *      +----------------------------+
  *      | Literal pool               |  -> 4-byte aligned, variable size
- *      .                            .
- *      .                            .
+ *      .                            .     Note: for x86 literals will
+ *      .                            .     generally appear inline.
  *      |                            |
  *      +----------------------------+
  *
@@ -102,7 +99,7 @@
  *      next safe point.
  */
 const Method *dvmJitToPatchPredictedChain(const Method *method,
-                                          InterpState *interpState,
+                                          Thread *self,
                                           PredictedChainingCell *cell,
                                           const ClassObject *clazz)
 {
@@ -145,3 +142,7 @@
 void dvmCompilerSortAndPrintTraceProfiles()
 {
 }
+
+void dvmJitScanAllClassPointers(void (*callback)(void *))
+{
+}
diff --git a/vm/compiler/codegen/x86/CalloutHelper.h b/vm/compiler/codegen/x86/CalloutHelper.h
index 3229a26..a64f017 100644
--- a/vm/compiler/codegen/x86/CalloutHelper.h
+++ b/vm/compiler/codegen/x86/CalloutHelper.h
@@ -53,23 +53,6 @@
 /*
  * Functions declared in gDvmInlineOpsTable[] are used for
  * OP_EXECUTE_INLINE & OP_EXECUTE_INLINE_RANGE.
- *
- *      org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod
- *      javaLangString_charAt
- *      javaLangString_compareTo
- *      javaLangString_equals
- *      javaLangString_indexOf_I
- *      javaLangString_indexOf_II
- *      javaLangString_length
- *      javaLangMath_abs_int
- *      javaLangMath_abs_long
- *      javaLangMath_abs_float
- *      javaLangMath_abs_double
- *      javaLangMath_min_int
- *      javaLangMath_max_int
- *      javaLangMath_sqrt
- *      javaLangMath_cos
- *      javaLangMath_sin
  */
 double sqrt(double x);  // INLINE_MATH_SQRT
 
diff --git a/vm/compiler/codegen/x86/CodegenDriver.c b/vm/compiler/codegen/x86/CodegenDriver.c
index 69f637e..a5ef56a 100644
--- a/vm/compiler/codegen/x86/CodegenDriver.c
+++ b/vm/compiler/codegen/x86/CodegenDriver.c
@@ -24,9 +24,64 @@
  * applicable directory below this one.
  */
 
+extern X86LIR *loadConstant(CompilationUnit *cUnit, int rDest, int value);
+extern X86LIR *loadWordDisp(CompilationUnit *cUnit, int rBase,
+                            int displacement, int rDest);
+extern void dvmCompilerFlushAllRegs(CompilationUnit *cUnit);
+extern void storeWordDisp(CompilationUnit *cUnit, int rBase,
+                          int displacement, int rSrc);
+extern X86LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc);
+
 static int opcodeCoverage[kNumPackedOpcodes];
 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
 
+#if 0   // Avoid compiler warnings when x86 disabled during development
+/*
+ * Bail to the interpreter.  Will not return to this trace.
+ * On entry, rPC must be set correctly.
+ */
+static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
+{
+    dvmCompilerFlushAllRegs(cUnit);
+    loadConstant(cUnit, rPC, (int)(cUnit->method->insns + offset));
+    loadWordDisp(cUnit, rEBP, 0, rECX);  // Get glue
+    loadWordDisp(cUnit, rECX,
+                 offsetof(Thread, jitToInterpEntries.dvmJitToInterpPunt),
+                 rEAX);
+    opReg(cUnit, kOpUncondBr, rEAX);
+}
+
+static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
+{
+    int flags = dexGetFlagsFromOpcode(mir->dalvikInsn.opcode);
+    int flagsToCheck = kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn |
+                       kInstrCanThrow;
+
+    //If already optimized out, just ignore
+    if (mir->dalvikInsn.opcode == OP_NOP)
+        return;
+
+    //Ugly, but necessary.  Flush all Dalvik regs so Interp can find them
+    dvmCompilerFlushAllRegs(cUnit);
+
+    if ((mir->next == NULL) || (flags & flagsToCheck)) {
+       genPuntToInterp(cUnit, mir->offset);
+       return;
+    }
+    int entryAddr = offsetof(Thread,
+                             jitToInterpEntries.dvmJitToInterpSingleStep);
+    loadWordDisp(cUnit, rEBP, 0, rECX);  // Get glue
+    loadWordDisp(cUnit, rECX, entryAddr, rEAX); // rEAX<- entry address
+    /* rPC = dalvik pc */
+    loadConstant(cUnit, rPC, (int) (cUnit->method->insns + mir->offset));
+    /* rECX = dalvik pc of following instruction */
+    loadConstant(cUnit, rECX, (int) (cUnit->method->insns + mir->next->offset));
+    /* Pass on the stack */
+    storeWordDisp(cUnit, rESP, OUT_ARG0, rECX);
+    opReg(cUnit, kOpCall, rEAX);
+}
+#endif
+
 /*
  * The following are the first-level codegen routines that analyze the format
  * of each bytecode then either dispatch special purpose codegen routines
@@ -158,6 +213,7 @@
 /* Accept the work and start compiling */
 bool dvmCompilerDoWork(CompilerWorkOrder *work)
 {
+    JitTraceDescription *desc;
     bool res;
 
     if (gDvmJit.codeCacheFull) {
@@ -167,14 +223,16 @@
     switch (work->kind) {
         case kWorkOrderTrace:
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+            desc = (JitTraceDescription *)work->info;
+            res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
                                   work->bailPtr, 0 /* no hints */);
             break;
         case kWorkOrderTraceDebug: {
             bool oldPrintMe = gDvmJit.printMe;
             gDvmJit.printMe = true;
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+            desc = (JitTraceDescription *)work->info;
+            res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
                                   work->bailPtr, 0 /* no hints */);
             gDvmJit.printMe = oldPrintMe;
             break;
@@ -245,6 +303,11 @@
                       templateEntryOffsets[TEMPLATE_INTERPRET]);
 }
 
+JitInstructionSetType dvmCompilerGetInterpretTemplateSet()
+{
+    return DALVIK_JIT_X86;
+}
+
 void dvmCompilerInitializeRegAlloc(CompilationUnit *cUnit)
 {
 }
diff --git a/vm/compiler/codegen/x86/X86LIR.h b/vm/compiler/codegen/x86/X86LIR.h
index 62ac447..19f08e1 100644
--- a/vm/compiler/codegen/x86/X86LIR.h
+++ b/vm/compiler/codegen/x86/X86LIR.h
@@ -27,7 +27,7 @@
  *     esp is native SP
  *
  * For interpreter:
- *     edx is Dalvik PC (rPC)
+ *     edi is Dalvik PC (rPC)
  *     ebx is rINST
  *
  * For JIT:
@@ -80,10 +80,6 @@
     int numFPTemps;
     RegisterInfo *FPTemps;
     int nextFPTemp;
-    int numCoreRegs;
-    RegisterInfo *coreRegs;
-    int numFPRegs;
-    RegisterInfo *FPRegs;
 } RegisterPool;
 
 typedef enum OpSize {
@@ -99,7 +95,6 @@
 
 typedef enum OpKind {
     kOpMov,
-    kOpMvn,
     kOpCmp,
     kOpLsl,
     kOpLsr,
@@ -114,15 +109,11 @@
     kOpAdc,
     kOpSub,
     kOpSbc,
-    kOpRsub,
     kOpMul,
     kOpDiv,
     kOpRem,
-    kOpBic,
-    kOpCmn,
     kOpTst,
-    kOpBkpt,
-    kOpBlx,
+    kOpCall,
     kOpPush,
     kOpPop,
     kOp2Char,
@@ -132,6 +123,37 @@
     kOpUncondBr,
 } OpKind;
 
+#define FP_REG_OFFSET 8
+
+typedef enum NativeRegisterPool {
+    rEAX = 0,
+    rECX = 1,
+    rEDX = 2,
+    rEBX = 3,
+    rESP = 4,
+    rEBP = 5,
+    rESI = 6,
+    rEDI = 7,
+    rXMM0 = 0 + FP_REG_OFFSET,
+    rXMM1 = 1 + FP_REG_OFFSET,
+    rXMM2 = 2 + FP_REG_OFFSET,
+    rXMM3 = 3 + FP_REG_OFFSET,
+    rXMM4 = 4 + FP_REG_OFFSET,
+    rXMM5 = 5 + FP_REG_OFFSET,
+    rXMM6 = 6 + FP_REG_OFFSET,
+    rXMM7 = 7 + FP_REG_OFFSET,
+} NativeRegisterPool;
+
+#define rPC rEDI
+#define rFP rESI
+#define rINST rEBX
+
+#define OUT_ARG0 0
+#define OUT_ARG1 4
+#define OUT_ARG2 8
+#define OUT_ARG3 12
+#define OUT_ARG4 16
+
 typedef struct X86LIR {
     LIR generic;
     //X86Opcode opcode;
diff --git a/vm/compiler/codegen/x86/ia32/ArchVariant.c b/vm/compiler/codegen/x86/ia32/ArchVariant.c
index 931189f..4ccd56f 100644
--- a/vm/compiler/codegen/x86/ia32/ArchVariant.c
+++ b/vm/compiler/codegen/x86/ia32/ArchVariant.c
@@ -74,7 +74,7 @@
      * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
      * offset from the struct is less than 128.
      */
-    assert((offsetof(InterpState, jitToInterpEntries) +
+    assert((offsetof(Thread, jitToInterpEntries) +
             sizeof(struct JitToInterpEntries)) <= 128);
     return true;
 }
diff --git a/vm/compiler/template/armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S b/vm/compiler/template/armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S
index 21e23a9..8bee853 100644
--- a/vm/compiler/template/armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S
+++ b/vm/compiler/template/armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S
@@ -9,9 +9,9 @@
      */
     vpush   {d0-d15}                    @ save out all fp registers
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     vpop    {d0-d15}                    @ restore all fp registers
diff --git a/vm/compiler/template/armv5te-vfp/TEMPLATE_RESTORE_STATE.S b/vm/compiler/template/armv5te-vfp/TEMPLATE_RESTORE_STATE.S
index ec80139..196d082 100644
--- a/vm/compiler/template/armv5te-vfp/TEMPLATE_RESTORE_STATE.S
+++ b/vm/compiler/template/armv5te-vfp/TEMPLATE_RESTORE_STATE.S
@@ -1,9 +1,9 @@
     /*
      * This handler restores state following a selfVerification memory access.
      * On entry:
-     *    r0 - offset from rGLUE to the 1st element of the coreRegs save array.
+     *    r0 - offset from rSELF to the 1st element of the coreRegs save array.
      */
-    add     r0, r0, rGLUE               @ pointer to heapArgSpace.coreRegs[0]
+    add     r0, r0, rSELF               @ pointer to heapArgSpace.coreRegs[0]
     add     r0, #64                     @ pointer to heapArgSpace.fpRegs[0]
     vldmia  r0, {d0-d15}
     sub     r0, #64                     @ pointer to heapArgSpace.coreRegs[0]
diff --git a/vm/compiler/template/armv5te-vfp/TEMPLATE_SAVE_STATE.S b/vm/compiler/template/armv5te-vfp/TEMPLATE_SAVE_STATE.S
index 1bd02c8..11f62b7 100644
--- a/vm/compiler/template/armv5te-vfp/TEMPLATE_SAVE_STATE.S
+++ b/vm/compiler/template/armv5te-vfp/TEMPLATE_SAVE_STATE.S
@@ -3,7 +3,7 @@
      * On entry:
      *    Top of stack + 4: r7 value to save
      *    Top of stack + 0: r0 value to save
-     *    r0 - offset from rGLUE to the beginning of the heapArgSpace record
+     *    r0 - offset from rSELF to the beginning of the heapArgSpace record
      *    r7 - the value of regMap
      *
      * The handler must save regMap, r0-r12 and then return with r0-r12
@@ -11,7 +11,7 @@
      * the values on the stack - not the ones in those registers on entry.
      * Finally, the two registers previously pushed must be popped.
      */
-    add     r0, r0, rGLUE               @ pointer to heapArgSpace
+    add     r0, r0, rSELF               @ pointer to heapArgSpace
     stmia   r0!, {r7}                   @ save regMap
     ldr     r7, [r13, #0]               @ recover r0 value
     stmia   r0!, {r7}                   @ save r0
diff --git a/vm/compiler/template/armv5te-vfp/TemplateOpList.h b/vm/compiler/template/armv5te-vfp/TemplateOpList.h
index d991bed..0365ba4 100644
--- a/vm/compiler/template/armv5te-vfp/TemplateOpList.h
+++ b/vm/compiler/template/armv5te-vfp/TemplateOpList.h
@@ -57,3 +57,9 @@
 JIT_TEMPLATE(INTERPRET)
 JIT_TEMPLATE(MONITOR_ENTER)
 JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INTERPRET.S b/vm/compiler/template/armv5te/TEMPLATE_INTERPRET.S
index 0163ce0..9f24887 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INTERPRET.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INTERPRET.S
@@ -9,7 +9,7 @@
      *        r1 - the Dalvik PC to begin interpretation.
      *    else
      *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
+     *    rSELF - pointer to thread
      *    rFP - Dalvik frame pointer
      */
     cmp     lr, #0
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index f1650d9..503d190 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -1,3 +1,4 @@
+%default { "chaintgt" : ".LinvokeChain" }
     /*
      * For monomorphic callsite, setup the Dalvik frame and return to the
      * Thumb code through the link register to transfer control to the callee
@@ -6,16 +7,15 @@
     @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
     @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
     @ methodToCall is guaranteed to be non-native
-.LinvokeChain:
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+$chaintgt:
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     add     r12, lr, #2                 @ setup the punt-to-interp address
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    r12                         @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -31,17 +31,16 @@
     bxne    r12                         @ bail to the interpreter
 
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    @ r0=methodToCall, r1=rSELF
     mov     lr, pc
     ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
new file mode 100644
index 0000000..d1be4fd
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 2a22a22..8681532 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -1,12 +1,11 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     @ r7 = methodToCall->registersSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
-    ldr     r8, [r8]                    @ r3<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -16,7 +15,6 @@
     @ set up newSaveArea
     str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
     str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
     cmp     r8, #0                      @ suspendCount != 0
     ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
@@ -27,25 +25,25 @@
 #endif
 
     @ go ahead and transfer control to the native code
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
     mov     r2, #0
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
-    mov     r2, r0                      @ r2<- methodToCall
-    mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=methodToCall, r6=rGLUE
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
     stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
     mov     r0, r2
     mov     r1, r6
-    @ r0=JNIMethod, r1=rGlue
+    @ r0=JNIMethod, r1=rSELF
     mov     lr, pc
     ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
@@ -53,26 +51,26 @@
 
     blx     r8                          @ off to the native code
 
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
-    @ r0=JNIMethod, r1=rGlue
+    @ r0=JNIMethod, r1=rSELF
     mov     lr, pc
     ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
     ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
new file mode 100644
index 0000000..816277a
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index 405065f..12b5e61 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -6,14 +6,13 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -39,17 +38,16 @@
 
     ldr     r10, .LdvmJitToInterpTraceSelectNoChain
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    @ r0=methodToCall, r1=rSELF
     mov     lr, pc
     ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
new file mode 100644
index 0000000..bfea7d9
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
index 65b2cc3..9dd4ff8 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
@@ -1,3 +1,4 @@
+%default { "chaintgt" : ".LinvokeChain" }
     /*
      * For polymorphic callsite, check whether the cached class pointer matches
      * the current one. If so setup the Dalvik frame and return to the
@@ -25,7 +26,7 @@
     ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
     ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
     ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
-    ldr     r9, [rGLUE, #offGlue_icRechainCount]   @ r1 <- shared rechainCount
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
     cmp     r3, r8          @ predicted class == actual class?
 #if defined(WITH_JIT_TUNING)
     ldr     r7, .LdvmICHitCount
@@ -42,12 +43,12 @@
 #endif
     ldreqh  r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldreqh  r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    beq     .LinvokeChain   @ predicted chain is valid
+    beq     $chaintgt   @ predicted chain is valid
     ldr     r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
     cmp     r8, #0          @ initialized class or not
     moveq   r1, #0
     subne   r1, r9, #1      @ count--
-    strne   r1, [rGLUE, #offGlue_icRechainCount]   @ write back to InterpState
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
     add     lr, lr, #4      @ return to fully-resolve landing pad
     /*
      * r1 <- count
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
new file mode 100644
index 0000000..6ca5bdd
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MEM_OP_DECODE.S b/vm/compiler/template/armv5te/TEMPLATE_MEM_OP_DECODE.S
index ecd4eaa..03926b6 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MEM_OP_DECODE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MEM_OP_DECODE.S
@@ -8,9 +8,9 @@
      * skip the memory op so it never gets executed.
      */
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     bx      lr                          @ return to compiled code
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER.S b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER.S
index 8e7f728..344a0da 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER.S
@@ -14,10 +14,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2                           @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r0, [r0]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
     ldr     r2, .LdvmJitToInterpNoChain
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     @ Bail to interpreter - no chain [note - r4 still contains rPC]
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kHeavyweightMonitor
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
index 5cf26e7..cc57e2b 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
@@ -14,11 +14,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2             @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status & test for exception
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r1, [rGLUE, #offGlue_self]
-    ldr     r0, [r0]
-    ldr     r1, [r1, #offThread_exception]
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
+    ldr     r1, [rSELF, #offThread_exception]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     cmp     r1, #0
     beq     1f
     ldr     r2, .LhandleException
diff --git a/vm/compiler/template/armv5te/TEMPLATE_PERIODIC_PROFILING.S b/vm/compiler/template/armv5te/TEMPLATE_PERIODIC_PROFILING.S
new file mode 100644
index 0000000..c0f7d6e
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_PERIODIC_PROFILING.S
@@ -0,0 +1,26 @@
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (lr-11) is address of pointer to counter.  Note: the counter
+     *    actually exists 10 bytes before the return target, but because
+     *    we are arriving from thumb mode, lr will have its low bit set.
+     */
+     ldr    r0, [lr,#-11]
+     ldr    r1, [rSELF, #offThread_pProfileCountdown]
+     ldr    r2, [r0]                    @ get counter
+     ldr    r3, [r1]                    @ get countdown timer
+     add    r2, #1
+     subs   r2, #1
+     blt    .L${opcode}_disable_profiling
+     str    r2, [r0]
+     str    r3, [r1]
+     bx     lr
+
+.L${opcode}_disable_profiling:
+     mov    r4, lr                     @ preserve lr
+     ldr    r0, .LdvmJitTraceProfilingOff
+     blx    r0
+     bx     r4
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RESTORE_STATE.S b/vm/compiler/template/armv5te/TEMPLATE_RESTORE_STATE.S
index e3719db..25b4ffa 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RESTORE_STATE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RESTORE_STATE.S
@@ -1,8 +1,8 @@
     /*
      * This handler restores state following a selfVerification memory access.
      * On entry:
-     *    r0 - offset from rGLUE to the 1st element of the coreRegs save array.
+     *    r0 - offset from rSELF to the 1st element of the coreRegs save array.
      */
-    add     r0, r0, rGLUE               @ pointer to heapArgSpace.coreRegs[0]
+    add     r0, r0, rSELF               @ pointer to heapArgSpace.coreRegs[0]
     ldmia   r0, {r0-r12}
     bx      lr
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index 564b844..c2926a3 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -5,17 +5,17 @@
      * address in the code cache following the invoke instruction. Otherwise
      * return to the special dvmJitToInterpNoChain entry point.
      */
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
-    @ r0=rGlue
+    @ r0=rSELF
     mov     lr, pc
     ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
@@ -24,7 +24,6 @@
 #endif
     ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
     beq     1f                          @ bail to interpreter
@@ -34,16 +33,15 @@
     ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
     mov     rFP, r10                    @ publish new FP
     ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
-    ldr     r8, [r8]                    @ r8<- suspendCount
 
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
     add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
-    str     r0, [rGLUE, #offGlue_methodClassDex]
+    str     r0, [rSELF, #offThread_methodClassDex]
     cmp     r8, #0                      @ check the suspendCount
     movne   r9, #0                      @ clear the chaining cell address
-    str     r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
     cmp     r9, #0                      @ chaining cell exists?
     blxne   r9                          @ jump to the chaining cell
 #if defined(WITH_JIT_TUNING)
@@ -51,8 +49,8 @@
 #endif
     mov     pc, r1                      @ callsite is interpreted
 1:
-    stmia   rGLUE, {rPC, rFP}           @ SAVE_PC_FP_TO_GLUE()
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
     ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
     mov     r1, #0                      @ changeInterp = false
-    mov     r0, rGLUE                   @ Expecting rGLUE in r0
+    mov     r0, rSELF                   @ Expecting rSELF in r0
     blx     r2                          @ exit the interpreter
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN_PROF.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN_PROF.S
new file mode 100644
index 0000000..d7af0bd
--- /dev/null
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_RETURN.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv5te/TEMPLATE_SAVE_STATE.S b/vm/compiler/template/armv5te/TEMPLATE_SAVE_STATE.S
index df2d1e6..1c3aa4d 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_SAVE_STATE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_SAVE_STATE.S
@@ -3,7 +3,7 @@
      * On entry:
      *    Top of stack + 4: r7 value to save
      *    Top of stack + 0: r0 value to save
-     *    r0 - offset from rGLUE to the beginning of the heapArgSpace record
+     *    r0 - offset from rSELF to the beginning of the heapArgSpace record
      *    r7 - the value of regMap
      *
      * The handler must save regMap, r0-r12 and then return with r0-r12
@@ -11,7 +11,7 @@
      * the values on the stack - not the ones in those registers on entry.
      * Finally, the two registers previously pushed must be popped.
      */
-    add     r0, r0, rGLUE               @ pointer to heapArgSpace
+    add     r0, r0, rSELF               @ pointer to heapArgSpace
     stmia   r0!, {r7}                   @ save regMap
     ldr     r7, [r13, #0]               @ recover r0 value
     stmia   r0!, {r7}                   @ save r0
diff --git a/vm/compiler/template/armv5te/TemplateOpList.h b/vm/compiler/template/armv5te/TemplateOpList.h
index e81383c..abfec4b 100644
--- a/vm/compiler/template/armv5te/TemplateOpList.h
+++ b/vm/compiler/template/armv5te/TemplateOpList.h
@@ -42,3 +42,9 @@
 JIT_TEMPLATE(INTERPRET)
 JIT_TEMPLATE(MONITOR_ENTER)
 JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index ba0335b..4164d4e 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -9,21 +9,23 @@
 .LinvokeNative:
     @ Prep for the native call
     @ r1 = newFP, r0 = methodToCall
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     mov     r2, #0
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
     mov     r2, r0                      @ r2<- methodToCall
+    ldr     lr, [lr]                    @ lr<- set of active profilers
     mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2: methodToCall, r6: rGLUE
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
+    ands    lr, #kSubModeMethodTrace
+    beq     121f                        @ hop if not profiling
+    @ r2: methodToCall, r6: rSELF
     stmfd   sp!, {r2,r6}
     stmfd   sp!, {r0-r3}
     mov     r0, r2
@@ -31,35 +33,37 @@
     mov     lr, pc
     ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
-#endif
 
     mov     lr, pc
     ldr     pc, [r2, #offMethod_nativeFunc]
 
-#if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
     mov     lr, pc
     ldr     pc, .LdvmFastNativeMethodTraceExit
-#endif
+    b       212f
+121:
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
+212:
     @ Refresh Jit's on/off status
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
     ldr     r3, [r3]    @ r1 <- pointer to Jit profile table
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r3, [rSELF, #offThread_pJitProfTable]  @ cache current JitProfTable
     ldr     r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
-    str     r3, [rGLUE, #offGlue_pJitProfTable]  @ cache current JitProfTable
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the new mode
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -81,9 +85,8 @@
 .LdeadFood:
     .word   0xdeadf00d
 #endif
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     mov     r2, #0
-    str     r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
@@ -104,6 +107,8 @@
     .word   dvmMterpCommonExceptionThrown
 .LdvmLockObject:
     .word   dvmLockObject
+.LdvmJitTraceProfilingOff:
+    .word   dvmJitTraceProfilingOff
 #if defined(WITH_JIT_TUNING)
 .LdvmICHitCount:
     .word   gDvmICHitCount
@@ -112,14 +117,12 @@
 .LdvmSelfVerificationMemOpDecode:
     .word   dvmSelfVerificationMemOpDecode
 #endif
-#if defined(WITH_INLINE_PROFILING)
 .LdvmFastMethodTraceEnter:
     .word   dvmFastMethodTraceEnter
 .LdvmFastNativeMethodTraceExit:
     .word   dvmFastNativeMethodTraceExit
 .LdvmFastJavaMethodTraceExit:
     .word   dvmFastJavaMethodTraceExit
-#endif
 .L__aeabi_cdcmple:
     .word   __aeabi_cdcmple
 .L__aeabi_cfcmple:
diff --git a/vm/compiler/template/armv5te/header.S b/vm/compiler/template/armv5te/header.S
index e6b3362..6dcf5b9 100644
--- a/vm/compiler/template/armv5te/header.S
+++ b/vm/compiler/template/armv5te/header.S
@@ -55,7 +55,7 @@
 
   reg nick      purpose
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     thread pointer
 
 The following registers have fixed assignments in mterp but are scratch
 registers in compiled code
@@ -73,7 +73,7 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
diff --git a/vm/compiler/template/armv7-a-neon/TemplateOpList.h b/vm/compiler/template/armv7-a-neon/TemplateOpList.h
index d991bed..0365ba4 100644
--- a/vm/compiler/template/armv7-a-neon/TemplateOpList.h
+++ b/vm/compiler/template/armv7-a-neon/TemplateOpList.h
@@ -57,3 +57,9 @@
 JIT_TEMPLATE(INTERPRET)
 JIT_TEMPLATE(MONITOR_ENTER)
 JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/armv7-a/TemplateOpList.h b/vm/compiler/template/armv7-a/TemplateOpList.h
index d991bed..0365ba4 100644
--- a/vm/compiler/template/armv7-a/TemplateOpList.h
+++ b/vm/compiler/template/armv7-a/TemplateOpList.h
@@ -57,3 +57,9 @@
 JIT_TEMPLATE(INTERPRET)
 JIT_TEMPLATE(MONITOR_ENTER)
 JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/config-armv5te-vfp b/vm/compiler/template/config-armv5te-vfp
index 1b02261..774bd96 100644
--- a/vm/compiler/template/config-armv5te-vfp
+++ b/vm/compiler/template/config-armv5te-vfp
@@ -48,6 +48,12 @@
     op TEMPLATE_INTERPRET armv5te
     op TEMPLATE_MONITOR_ENTER armv5te
     op TEMPLATE_MONITOR_ENTER_DEBUG armv5te
+    op TEMPLATE_PERIODIC_PROFILING armv5te
+    op TEMPLATE_INVOKE_METHOD_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
+    op TEMPLATE_RETURN_PROF armv5te
 
 op-end
 
diff --git a/vm/compiler/template/config-armv7-a b/vm/compiler/template/config-armv7-a
index be7af31..9d66e55 100644
--- a/vm/compiler/template/config-armv7-a
+++ b/vm/compiler/template/config-armv7-a
@@ -48,6 +48,12 @@
     op TEMPLATE_INTERPRET armv5te
     op TEMPLATE_MONITOR_ENTER armv5te
     op TEMPLATE_MONITOR_ENTER_DEBUG armv5te
+    op TEMPLATE_PERIODIC_PROFILING armv5te
+    op TEMPLATE_INVOKE_METHOD_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
+    op TEMPLATE_RETURN_PROF armv5te
 op-end
 
 # "helper" code for C; include if you use any of the C stubs (this generates
diff --git a/vm/compiler/template/config-armv7-a-neon b/vm/compiler/template/config-armv7-a-neon
index be7af31..9d66e55 100644
--- a/vm/compiler/template/config-armv7-a-neon
+++ b/vm/compiler/template/config-armv7-a-neon
@@ -48,6 +48,12 @@
     op TEMPLATE_INTERPRET armv5te
     op TEMPLATE_MONITOR_ENTER armv5te
     op TEMPLATE_MONITOR_ENTER_DEBUG armv5te
+    op TEMPLATE_PERIODIC_PROFILING armv5te
+    op TEMPLATE_INVOKE_METHOD_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
+    op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
+    op TEMPLATE_RETURN_PROF armv5te
 op-end
 
 # "helper" code for C; include if you use any of the C stubs (this generates
diff --git a/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S b/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S
index 4c98917..5c7bf7c 100644
--- a/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S
+++ b/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S
@@ -1,27 +1,38 @@
     /*
-     * TODO: figure out how best to do this on x86, as we don't have
-     * an lr equivalent and probably don't want to push.
+     * This handler is a bit odd - it may be called via chaining or
+     * from static code and is expected to cause control to flow
+     * to the interpreter.  The problem is where to find the Dalvik
+     * PC of the next instruction.  When called via chaining, the dPC
+     * will be located at *rp.  When called from static code, rPC is
+     * valid and rp is a real return pointer (that should be ignored).
+     * The Arm target deals with this by using the link register as
+     * a flag.  If it is zero, we know we were called from static code.
+     * If non-zero, it points to the chain cell containing dPC.
+     * For x86, we'll infer the source by looking where rp points.
+     * If it points to anywhere within the code cache, we'll assume
+     * we got here via chaining.  Otherwise, we'll assume rPC is valid.
      *
-     * This handler transfers control to the interpeter without performing
-     * any lookups.  It may be called either as part of a normal chaining
-     * operation, or from the transition code in header.S.  We distinquish
-     * the two cases by looking at the link register.  If called from a
-     * translation chain, it will point to the chaining Dalvik PC -3.
      * On entry:
-     *    lr - if NULL:
-     *        r1 - the Dalvik PC to begin interpretation.
-     *    else
-     *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
-     *    rFP - Dalvik frame pointer
-     *
-     *cmp     lr, #0
-     *ldrne   r1,[lr, #3]
-     *ldr     r2, .LinterpPunt
-     *mov     r0, r1                       @ set Dalvik PC
-     *bx      r2
-     *@ doesn't return
+     *    (TOS)<- return pointer or pointer to dPC
      */
 
+/*
+ * FIXME - this won't work as-is.  The cache boundaries are not
+ * set up until later.  Perhaps rething this whole thing.  Do we
+ * really need an interpret teplate?
+ */
+
+
+     movl   rSELF,%ecx
+     movl   $$.LinterpPunt,%edx
+     pop    %eax
+     /*cmpl   %eax,offThread_jitCacheEnd(%ecx)*/
+     ja     1f
+     /*cmpl   %eax,offThread_jitCacheStart(%ecx)*/
+     jb     1f
+     movl   %eax,rPC
+1:
+     jmp    *(%edx)
+
 .LinterpPunt:
     .long   dvmJitToInterpPunt
diff --git a/vm/compiler/template/ia32/footer.S b/vm/compiler/template/ia32/footer.S
index 1b1a1ae..d350c77 100644
--- a/vm/compiler/template/ia32/footer.S
+++ b/vm/compiler/template/ia32/footer.S
@@ -6,14 +6,6 @@
 
     .text
     .align  4
-/*
- * FIXME - need a cacheflush for x86
- */
-    .global cacheflush
-cacheflush:
-    movl  $$0xdeadf0f0, %eax
-    call *%eax
-
 
     .global dmvCompilerTemplateEnd
 dmvCompilerTemplateEnd:
diff --git a/vm/compiler/template/ia32/header.S b/vm/compiler/template/ia32/header.S
index 57f5a5b..ea2cc0f 100644
--- a/vm/compiler/template/ia32/header.S
+++ b/vm/compiler/template/ia32/header.S
@@ -16,6 +16,12 @@
 
 #if defined(WITH_JIT)
 
+/* Subset of defines from mterp/x86/header.S */
+#define rSELF (%ebp)
+#define rPC   %esi
+#define rFP   %edi
+#define rINST %ebx
+
 /*
  * This is a #include, not a %include, because we want the C pre-processor
  * to expand the macros into assembler assignment statements.
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index e1d0524..2e941e5 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -62,7 +62,7 @@
 
   reg nick      purpose
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     thread pointer
 
 The following registers have fixed assignments in mterp but are scratch
 registers in compiled code
@@ -80,7 +80,7 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -177,16 +166,17 @@
      * address in the code cache following the invoke instruction. Otherwise
      * return to the special dvmJitToInterpNoChain entry point.
      */
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
-    @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
@@ -195,7 +185,6 @@
 #endif
     ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
     beq     1f                          @ bail to interpreter
@@ -205,16 +194,15 @@
     ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
     mov     rFP, r10                    @ publish new FP
     ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
-    ldr     r8, [r8]                    @ r8<- suspendCount
 
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
     add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
-    str     r0, [rGLUE, #offGlue_methodClassDex]
+    str     r0, [rSELF, #offThread_methodClassDex]
     cmp     r8, #0                      @ check the suspendCount
     movne   r9, #0                      @ clear the chaining cell address
-    str     r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
     cmp     r9, #0                      @ chaining cell exists?
     blxne   r9                          @ jump to the chaining cell
 #if defined(WITH_JIT_TUNING)
@@ -222,10 +210,10 @@
 #endif
     mov     pc, r1                      @ callsite is interpreted
 1:
-    stmia   rGLUE, {rPC, rFP}           @ SAVE_PC_FP_TO_GLUE()
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
     ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
     mov     r1, #0                      @ changeInterp = false
-    mov     r0, rGLUE                   @ Expecting rGLUE in r0
+    mov     r0, rSELF                   @ Expecting rSELF in r0
     blx     r2                          @ exit the interpreter
 
 /* ------------------------------ */
@@ -241,14 +229,13 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -274,18 +261,18 @@
 
     ldr     r10, .LdvmJitToInterpTraceSelectNoChain
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -309,15 +296,14 @@
     @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
     @ methodToCall is guaranteed to be non-native
 .LinvokeChain:
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     add     r12, lr, #2                 @ setup the punt-to-interp address
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    r12                         @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -333,18 +319,18 @@
     bxne    r12                         @ bail to the interpreter
 
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -382,7 +368,7 @@
     ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
     ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
     ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
-    ldr     r9, [rGLUE, #offGlue_icRechainCount]   @ r1 <- shared rechainCount
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
     cmp     r3, r8          @ predicted class == actual class?
 #if defined(WITH_JIT_TUNING)
     ldr     r7, .LdvmICHitCount
@@ -404,7 +390,7 @@
     cmp     r8, #0          @ initialized class or not
     moveq   r1, #0
     subne   r1, r9, #1      @ count--
-    strne   r1, [rGLUE, #offGlue_icRechainCount]   @ write back to InterpState
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
     add     lr, lr, #4      @ return to fully-resolve landing pad
     /*
      * r1 <- count
@@ -422,13 +408,12 @@
 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     @ r7 = methodToCall->registersSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
-    ldr     r8, [r8]                    @ r3<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -438,7 +423,6 @@
     @ set up newSaveArea
     str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
     str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
     cmp     r8, #0                      @ suspendCount != 0
     ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
@@ -449,50 +433,52 @@
 #endif
 
     @ go ahead and transfer control to the native code
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
     mov     r2, #0
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
-    mov     r2, r0                      @ r2<- methodToCall
-    mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=methodToCall, r6=rGLUE
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
     stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
     mov     r0, r2
     mov     r1, r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
     blx     r8                          @ off to the native code
 
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
     ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1108,9 +1094,9 @@
      */
     vpush   {d0-d15}                    @ save out all fp registers
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     vpop    {d0-d15}                    @ restore all fp registers
@@ -1390,7 +1376,7 @@
      *        r1 - the Dalvik PC to begin interpretation.
      *    else
      *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
+     *    rSELF - pointer to thread
      *    rFP - Dalvik frame pointer
      */
     cmp     lr, #0
@@ -1431,10 +1417,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2                           @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r0, [r0]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
     ldr     r2, .LdvmJitToInterpNoChain
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     @ Bail to interpreter - no chain [note - r4 still contains rPC]
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kHeavyweightMonitor
@@ -1462,11 +1448,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2             @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status & test for exception
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r1, [rGLUE, #offGlue_self]
-    ldr     r0, [r0]
-    ldr     r1, [r1, #offThread_exception]
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
+    ldr     r1, [rSELF, #offThread_exception]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     cmp     r1, #0
     beq     1f
     ldr     r2, .LhandleException
@@ -1479,6 +1464,394 @@
 #endif
     ldr     pc, .LdvmJitToInterpNoChain
 
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (lr-11) is address of pointer to counter.  Note: the counter
+     *    actually exists 10 bytes before the return target, but because
+     *    we are arriving from thumb mode, lr will have its low bit set.
+     */
+     ldr    r0, [lr,#-11]
+     ldr    r1, [rSELF, #offThread_pProfileCountdown]
+     ldr    r2, [r0]                    @ get counter
+     ldr    r3, [r1]                    @ get countdown timer
+     add    r2, #1
+     subs   r2, #1
+     blt    .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+     str    r2, [r0]
+     str    r3, [r1]
+     bx     lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+     mov    r4, lr                     @ preserve lr
+     ldr    r0, .LdvmJitTraceProfilingOff
+     blx    r0
+     bx     r4
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve live registers
+    mov     r0, r6
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
+    ldmfd   sp!, {r0-r2,lr}             @ restore live registers
+#endif
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+    mov     r9, #0                      @ disable chaining
+#endif
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     1f                          @ bail to interpreter
+#else
+    blxeq   lr                          @ punt to interpreter and compare state
+#endif
+    ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+    mov     rFP, r10                    @ publish new FP
+    ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
+    ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
+    add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
+    str     r0, [rSELF, #offThread_methodClassDex]
+    cmp     r8, #0                      @ check the suspendCount
+    movne   r9, #0                      @ clear the chaining cell address
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+    cmp     r9, #0                      @ chaining cell exists?
+    blxne   r9                          @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1                      @ callsite is interpreted
+1:
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
+    ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
+    mov     r1, #0                      @ changeInterp = false
+    mov     r0, rSELF                   @ Expecting rSELF in r0
+    blx     r2                          @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    ldr     r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- methodToCall->insns
+
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    lr                          @ bail to the interpreter
+    tst     r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+    bne     .LinvokeNative
+#else
+    bxne    lr                          @ bail to the interpreter
+#endif
+
+    ldr     r10, .LdvmJitToInterpTraceSelectNoChain
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r3}                    @ preserve r0-r3
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                    @ restore r0-r3
+#endif
+
+    @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kInlineCacheMiss
+#endif
+    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+    @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+    @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    add     r12, lr, #2                 @ setup the punt-to-interp address
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    r12                         @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    r12                         @ bail to the interpreter
+
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r2,lr}             @ restore registers
+#endif
+
+    bx      lr                              @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr  : to branch to the chaining cell
+     *    - lr+2: to punt to the interpreter
+     *    - lr+4: to fully resolve the callee and may rechain.
+     *            r3 <- class
+     *            r9 <- counter
+     */
+    @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+    ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
+    ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
+    ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+    cmp     r3, r8          @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+    ldr     r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+    /* Don't use conditional loads if the HW defect exists */
+    bne     101f
+    ldr     r10, [r7, #0]
+101:
+#else
+    ldreq   r10, [r7, #0]
+#endif
+    add     r10, r10, #1
+    streq   r10, [r7, #0]
+#endif
+    ldreqh  r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldreqh  r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    beq     .LinvokeChainProf   @ predicted chain is valid
+    ldr     r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+    cmp     r8, #0          @ initialized class or not
+    moveq   r1, #0
+    subne   r1, r9, #1      @ count--
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
+    add     lr, lr, #4      @ return to fully-resolve landing pad
+    /*
+     * r1 <- count
+     * r2 <- &predictedChainCell
+     * r3 <- this->class
+     * r4 <- dPC
+     * r7 <- this->class->vtable
+     */
+    bx      lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    @ r7 = methodToCall->registersSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    bxne    lr                          @ bail to the interpreter
+#else
+    bx      lr                          @ bail to interpreter unconditionally
+#endif
+
+    @ go ahead and transfer control to the native code
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    mov     r2, #0
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+                                        @ newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
+
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
+    stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
+    stmfd   sp!, {r0-r3}                @ preserve r0-r3
+    mov     r0, r2
+    mov     r1, r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                @ restore r0-r3
+#endif
+
+    blx     r8                          @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    ldmfd   sp!, {r0-r1}                @ restore r2 and r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+#endif
+    @ native return; r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+    ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+    @ r0 = dalvikCallsitePC
+    bne     .LhandleException           @ no, handle exception
+
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+    cmp     r2, #0                      @ return chaining cell still exists?
+    bxne    r2                          @ yes - go ahead
+
+    @ continue executing the next instruction through the interpreter
+    ldr     r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+    add     rPC, r0, #6                 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
     .size   dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
 /* File: armv5te/footer.S */
 /*
@@ -1492,54 +1865,61 @@
 .LinvokeNative:
     @ Prep for the native call
     @ r1 = newFP, r0 = methodToCall
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     mov     r2, #0
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
     mov     r2, r0                      @ r2<- methodToCall
+    ldr     lr, [lr]                    @ lr<- set of active profilers
     mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2: methodToCall, r6: rGLUE
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
+    ands    lr, #kSubModeMethodTrace
+    beq     121f                        @ hop if not profiling
+    @ r2: methodToCall, r6: rSELF
     stmfd   sp!, {r2,r6}
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
-#endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
-#if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
-#endif
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+    b       212f
+121:
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
+212:
     @ Refresh Jit's on/off status
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
     ldr     r3, [r3]    @ r1 <- pointer to Jit profile table
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r3, [rSELF, #offThread_pJitProfTable]  @ cache current JitProfTable
     ldr     r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
-    str     r3, [rGLUE, #offGlue_pJitProfTable]  @ cache current JitProfTable
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the new mode
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1561,9 +1941,8 @@
 .LdeadFood:
     .word   0xdeadf00d
 #endif
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     mov     r2, #0
-    str     r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
@@ -1584,6 +1963,8 @@
     .word   dvmMterpCommonExceptionThrown
 .LdvmLockObject:
     .word   dvmLockObject
+.LdvmJitTraceProfilingOff:
+    .word   dvmJitTraceProfilingOff
 #if defined(WITH_JIT_TUNING)
 .LdvmICHitCount:
     .word   gDvmICHitCount
@@ -1592,14 +1973,12 @@
 .LdvmSelfVerificationMemOpDecode:
     .word   dvmSelfVerificationMemOpDecode
 #endif
-#if defined(WITH_INLINE_PROFILING)
 .LdvmFastMethodTraceEnter:
     .word   dvmFastMethodTraceEnter
 .LdvmFastNativeMethodTraceExit:
     .word   dvmFastNativeMethodTraceExit
 .LdvmFastJavaMethodTraceExit:
     .word   dvmFastJavaMethodTraceExit
-#endif
 .L__aeabi_cdcmple:
     .word   __aeabi_cdcmple
 .L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 5a47750..aebad92 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -62,7 +62,7 @@
 
   reg nick      purpose
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     thread pointer
 
 The following registers have fixed assignments in mterp but are scratch
 registers in compiled code
@@ -80,7 +80,7 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -177,16 +166,17 @@
      * address in the code cache following the invoke instruction. Otherwise
      * return to the special dvmJitToInterpNoChain entry point.
      */
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
-    @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
@@ -195,7 +185,6 @@
 #endif
     ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
     beq     1f                          @ bail to interpreter
@@ -205,16 +194,15 @@
     ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
     mov     rFP, r10                    @ publish new FP
     ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
-    ldr     r8, [r8]                    @ r8<- suspendCount
 
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
     add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
-    str     r0, [rGLUE, #offGlue_methodClassDex]
+    str     r0, [rSELF, #offThread_methodClassDex]
     cmp     r8, #0                      @ check the suspendCount
     movne   r9, #0                      @ clear the chaining cell address
-    str     r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
     cmp     r9, #0                      @ chaining cell exists?
     blxne   r9                          @ jump to the chaining cell
 #if defined(WITH_JIT_TUNING)
@@ -222,10 +210,10 @@
 #endif
     mov     pc, r1                      @ callsite is interpreted
 1:
-    stmia   rGLUE, {rPC, rFP}           @ SAVE_PC_FP_TO_GLUE()
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
     ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
     mov     r1, #0                      @ changeInterp = false
-    mov     r0, rGLUE                   @ Expecting rGLUE in r0
+    mov     r0, rSELF                   @ Expecting rSELF in r0
     blx     r2                          @ exit the interpreter
 
 /* ------------------------------ */
@@ -241,14 +229,13 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -274,18 +261,18 @@
 
     ldr     r10, .LdvmJitToInterpTraceSelectNoChain
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -309,15 +296,14 @@
     @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
     @ methodToCall is guaranteed to be non-native
 .LinvokeChain:
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     add     r12, lr, #2                 @ setup the punt-to-interp address
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    r12                         @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -333,18 +319,18 @@
     bxne    r12                         @ bail to the interpreter
 
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -382,7 +368,7 @@
     ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
     ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
     ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
-    ldr     r9, [rGLUE, #offGlue_icRechainCount]   @ r1 <- shared rechainCount
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
     cmp     r3, r8          @ predicted class == actual class?
 #if defined(WITH_JIT_TUNING)
     ldr     r7, .LdvmICHitCount
@@ -404,7 +390,7 @@
     cmp     r8, #0          @ initialized class or not
     moveq   r1, #0
     subne   r1, r9, #1      @ count--
-    strne   r1, [rGLUE, #offGlue_icRechainCount]   @ write back to InterpState
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
     add     lr, lr, #4      @ return to fully-resolve landing pad
     /*
      * r1 <- count
@@ -422,13 +408,12 @@
 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     @ r7 = methodToCall->registersSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
-    ldr     r8, [r8]                    @ r3<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -438,7 +423,6 @@
     @ set up newSaveArea
     str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
     str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
     cmp     r8, #0                      @ suspendCount != 0
     ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
@@ -449,50 +433,52 @@
 #endif
 
     @ go ahead and transfer control to the native code
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
     mov     r2, #0
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
-    mov     r2, r0                      @ r2<- methodToCall
-    mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=methodToCall, r6=rGLUE
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
     stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
     mov     r0, r2
     mov     r1, r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
     blx     r8                          @ off to the native code
 
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
     ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -527,7 +513,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ PIC way of "bl __aeabi_cdcmple"
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
     bhi     .LTEMPLATE_CMPG_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -540,7 +527,8 @@
 .LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -569,7 +557,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ PIC way of "bl __aeabi_cdcmple"
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
     bhi     .LTEMPLATE_CMPL_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -582,7 +571,8 @@
 .LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -631,7 +621,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ cmp <=: C clear if <, Z set if eq
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
     bhi     .LTEMPLATE_CMPG_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -642,7 +633,8 @@
 .LTEMPLATE_CMPG_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -691,7 +683,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ cmp <=: C clear if <, Z set if eq
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
     bhi     .LTEMPLATE_CMPL_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -702,7 +695,8 @@
 .LTEMPLATE_CMPL_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -832,9 +826,9 @@
      * skip the memory op so it never gets executed.
      */
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     bx      lr                          @ return to compiled code
@@ -1113,7 +1107,7 @@
      *        r1 - the Dalvik PC to begin interpretation.
      *    else
      *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
+     *    rSELF - pointer to thread
      *    rFP - Dalvik frame pointer
      */
     cmp     lr, #0
@@ -1154,10 +1148,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2                           @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r0, [r0]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
     ldr     r2, .LdvmJitToInterpNoChain
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     @ Bail to interpreter - no chain [note - r4 still contains rPC]
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kHeavyweightMonitor
@@ -1185,11 +1179,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2             @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status & test for exception
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r1, [rGLUE, #offGlue_self]
-    ldr     r0, [r0]
-    ldr     r1, [r1, #offThread_exception]
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
+    ldr     r1, [rSELF, #offThread_exception]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     cmp     r1, #0
     beq     1f
     ldr     r2, .LhandleException
@@ -1202,6 +1195,394 @@
 #endif
     ldr     pc, .LdvmJitToInterpNoChain
 
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (lr-11) is address of pointer to counter.  Note: the counter
+     *    actually exists 10 bytes before the return target, but because
+     *    we are arriving from thumb mode, lr will have its low bit set.
+     */
+     ldr    r0, [lr,#-11]
+     ldr    r1, [rSELF, #offThread_pProfileCountdown]
+     ldr    r2, [r0]                    @ get counter
+     ldr    r3, [r1]                    @ get countdown timer
+     add    r2, #1
+     subs   r2, #1
+     blt    .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+     str    r2, [r0]
+     str    r3, [r1]
+     bx     lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+     mov    r4, lr                     @ preserve lr
+     ldr    r0, .LdvmJitTraceProfilingOff
+     blx    r0
+     bx     r4
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve live registers
+    mov     r0, r6
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
+    ldmfd   sp!, {r0-r2,lr}             @ restore live registers
+#endif
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+    mov     r9, #0                      @ disable chaining
+#endif
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     1f                          @ bail to interpreter
+#else
+    blxeq   lr                          @ punt to interpreter and compare state
+#endif
+    ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+    mov     rFP, r10                    @ publish new FP
+    ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
+    ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
+    add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
+    str     r0, [rSELF, #offThread_methodClassDex]
+    cmp     r8, #0                      @ check the suspendCount
+    movne   r9, #0                      @ clear the chaining cell address
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+    cmp     r9, #0                      @ chaining cell exists?
+    blxne   r9                          @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1                      @ callsite is interpreted
+1:
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
+    ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
+    mov     r1, #0                      @ changeInterp = false
+    mov     r0, rSELF                   @ Expecting rSELF in r0
+    blx     r2                          @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    ldr     r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- methodToCall->insns
+
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    lr                          @ bail to the interpreter
+    tst     r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+    bne     .LinvokeNative
+#else
+    bxne    lr                          @ bail to the interpreter
+#endif
+
+    ldr     r10, .LdvmJitToInterpTraceSelectNoChain
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r3}                    @ preserve r0-r3
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                    @ restore r0-r3
+#endif
+
+    @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kInlineCacheMiss
+#endif
+    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+    @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+    @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    add     r12, lr, #2                 @ setup the punt-to-interp address
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    r12                         @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    r12                         @ bail to the interpreter
+
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r2,lr}             @ restore registers
+#endif
+
+    bx      lr                              @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr  : to branch to the chaining cell
+     *    - lr+2: to punt to the interpreter
+     *    - lr+4: to fully resolve the callee and may rechain.
+     *            r3 <- class
+     *            r9 <- counter
+     */
+    @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+    ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
+    ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
+    ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+    cmp     r3, r8          @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+    ldr     r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+    /* Don't use conditional loads if the HW defect exists */
+    bne     101f
+    ldr     r10, [r7, #0]
+101:
+#else
+    ldreq   r10, [r7, #0]
+#endif
+    add     r10, r10, #1
+    streq   r10, [r7, #0]
+#endif
+    ldreqh  r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldreqh  r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    beq     .LinvokeChainProf   @ predicted chain is valid
+    ldr     r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+    cmp     r8, #0          @ initialized class or not
+    moveq   r1, #0
+    subne   r1, r9, #1      @ count--
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
+    add     lr, lr, #4      @ return to fully-resolve landing pad
+    /*
+     * r1 <- count
+     * r2 <- &predictedChainCell
+     * r3 <- this->class
+     * r4 <- dPC
+     * r7 <- this->class->vtable
+     */
+    bx      lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    @ r7 = methodToCall->registersSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    bxne    lr                          @ bail to the interpreter
+#else
+    bx      lr                          @ bail to interpreter unconditionally
+#endif
+
+    @ go ahead and transfer control to the native code
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    mov     r2, #0
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+                                        @ newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
+
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
+    stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
+    stmfd   sp!, {r0-r3}                @ preserve r0-r3
+    mov     r0, r2
+    mov     r1, r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                @ restore r0-r3
+#endif
+
+    blx     r8                          @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    ldmfd   sp!, {r0-r1}                @ restore r2 and r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+#endif
+    @ native return; r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+    ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+    @ r0 = dalvikCallsitePC
+    bne     .LhandleException           @ no, handle exception
+
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+    cmp     r2, #0                      @ return chaining cell still exists?
+    bxne    r2                          @ yes - go ahead
+
+    @ continue executing the next instruction through the interpreter
+    ldr     r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+    add     rPC, r0, #6                 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
     .size   dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
 /* File: armv5te/footer.S */
 /*
@@ -1215,54 +1596,61 @@
 .LinvokeNative:
     @ Prep for the native call
     @ r1 = newFP, r0 = methodToCall
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     mov     r2, #0
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
     mov     r2, r0                      @ r2<- methodToCall
+    ldr     lr, [lr]                    @ lr<- set of active profilers
     mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2: methodToCall, r6: rGLUE
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
+    ands    lr, #kSubModeMethodTrace
+    beq     121f                        @ hop if not profiling
+    @ r2: methodToCall, r6: rSELF
     stmfd   sp!, {r2,r6}
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
-#endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
-#if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
-#endif
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+    b       212f
+121:
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
+212:
     @ Refresh Jit's on/off status
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
     ldr     r3, [r3]    @ r1 <- pointer to Jit profile table
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r3, [rSELF, #offThread_pJitProfTable]  @ cache current JitProfTable
     ldr     r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
-    str     r3, [rGLUE, #offGlue_pJitProfTable]  @ cache current JitProfTable
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the new mode
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1284,9 +1672,8 @@
 .LdeadFood:
     .word   0xdeadf00d
 #endif
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     mov     r2, #0
-    str     r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
@@ -1307,6 +1694,8 @@
     .word   dvmMterpCommonExceptionThrown
 .LdvmLockObject:
     .word   dvmLockObject
+.LdvmJitTraceProfilingOff:
+    .word   dvmJitTraceProfilingOff
 #if defined(WITH_JIT_TUNING)
 .LdvmICHitCount:
     .word   gDvmICHitCount
@@ -1315,14 +1704,12 @@
 .LdvmSelfVerificationMemOpDecode:
     .word   dvmSelfVerificationMemOpDecode
 #endif
-#if defined(WITH_INLINE_PROFILING)
 .LdvmFastMethodTraceEnter:
     .word   dvmFastMethodTraceEnter
 .LdvmFastNativeMethodTraceExit:
     .word   dvmFastNativeMethodTraceExit
 .LdvmFastJavaMethodTraceExit:
     .word   dvmFastJavaMethodTraceExit
-#endif
 .L__aeabi_cdcmple:
     .word   __aeabi_cdcmple
 .L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 9fb8892..fb1e048 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -62,7 +62,7 @@
 
   reg nick      purpose
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     thread pointer
 
 The following registers have fixed assignments in mterp but are scratch
 registers in compiled code
@@ -80,7 +80,7 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -177,16 +166,17 @@
      * address in the code cache following the invoke instruction. Otherwise
      * return to the special dvmJitToInterpNoChain entry point.
      */
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
-    @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
@@ -195,7 +185,6 @@
 #endif
     ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
     beq     1f                          @ bail to interpreter
@@ -205,16 +194,15 @@
     ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
     mov     rFP, r10                    @ publish new FP
     ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
-    ldr     r8, [r8]                    @ r8<- suspendCount
 
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
     add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
-    str     r0, [rGLUE, #offGlue_methodClassDex]
+    str     r0, [rSELF, #offThread_methodClassDex]
     cmp     r8, #0                      @ check the suspendCount
     movne   r9, #0                      @ clear the chaining cell address
-    str     r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
     cmp     r9, #0                      @ chaining cell exists?
     blxne   r9                          @ jump to the chaining cell
 #if defined(WITH_JIT_TUNING)
@@ -222,10 +210,10 @@
 #endif
     mov     pc, r1                      @ callsite is interpreted
 1:
-    stmia   rGLUE, {rPC, rFP}           @ SAVE_PC_FP_TO_GLUE()
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
     ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
     mov     r1, #0                      @ changeInterp = false
-    mov     r0, rGLUE                   @ Expecting rGLUE in r0
+    mov     r0, rSELF                   @ Expecting rSELF in r0
     blx     r2                          @ exit the interpreter
 
 /* ------------------------------ */
@@ -241,14 +229,13 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -274,18 +261,18 @@
 
     ldr     r10, .LdvmJitToInterpTraceSelectNoChain
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -309,15 +296,14 @@
     @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
     @ methodToCall is guaranteed to be non-native
 .LinvokeChain:
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     add     r12, lr, #2                 @ setup the punt-to-interp address
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    r12                         @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -333,18 +319,18 @@
     bxne    r12                         @ bail to the interpreter
 
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -382,7 +368,7 @@
     ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
     ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
     ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
-    ldr     r9, [rGLUE, #offGlue_icRechainCount]   @ r1 <- shared rechainCount
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
     cmp     r3, r8          @ predicted class == actual class?
 #if defined(WITH_JIT_TUNING)
     ldr     r7, .LdvmICHitCount
@@ -404,7 +390,7 @@
     cmp     r8, #0          @ initialized class or not
     moveq   r1, #0
     subne   r1, r9, #1      @ count--
-    strne   r1, [rGLUE, #offGlue_icRechainCount]   @ write back to InterpState
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
     add     lr, lr, #4      @ return to fully-resolve landing pad
     /*
      * r1 <- count
@@ -422,13 +408,12 @@
 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     @ r7 = methodToCall->registersSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
-    ldr     r8, [r8]                    @ r3<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -438,7 +423,6 @@
     @ set up newSaveArea
     str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
     str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
     cmp     r8, #0                      @ suspendCount != 0
     ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
@@ -449,50 +433,52 @@
 #endif
 
     @ go ahead and transfer control to the native code
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
     mov     r2, #0
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
-    mov     r2, r0                      @ r2<- methodToCall
-    mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=methodToCall, r6=rGLUE
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
     stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
     mov     r0, r2
     mov     r1, r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
     blx     r8                          @ off to the native code
 
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
     ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1108,9 +1094,9 @@
      */
     vpush   {d0-d15}                    @ save out all fp registers
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     vpop    {d0-d15}                    @ restore all fp registers
@@ -1390,7 +1376,7 @@
      *        r1 - the Dalvik PC to begin interpretation.
      *    else
      *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
+     *    rSELF - pointer to thread
      *    rFP - Dalvik frame pointer
      */
     cmp     lr, #0
@@ -1431,10 +1417,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2                           @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r0, [r0]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
     ldr     r2, .LdvmJitToInterpNoChain
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     @ Bail to interpreter - no chain [note - r4 still contains rPC]
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kHeavyweightMonitor
@@ -1462,11 +1448,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2             @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status & test for exception
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r1, [rGLUE, #offGlue_self]
-    ldr     r0, [r0]
-    ldr     r1, [r1, #offThread_exception]
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
+    ldr     r1, [rSELF, #offThread_exception]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     cmp     r1, #0
     beq     1f
     ldr     r2, .LhandleException
@@ -1479,6 +1464,394 @@
 #endif
     ldr     pc, .LdvmJitToInterpNoChain
 
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (lr-11) is address of pointer to counter.  Note: the counter
+     *    actually exists 10 bytes before the return target, but because
+     *    we are arriving from thumb mode, lr will have its low bit set.
+     */
+     ldr    r0, [lr,#-11]
+     ldr    r1, [rSELF, #offThread_pProfileCountdown]
+     ldr    r2, [r0]                    @ get counter
+     ldr    r3, [r1]                    @ get countdown timer
+     add    r2, #1
+     subs   r2, #1
+     blt    .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+     str    r2, [r0]
+     str    r3, [r1]
+     bx     lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+     mov    r4, lr                     @ preserve lr
+     ldr    r0, .LdvmJitTraceProfilingOff
+     blx    r0
+     bx     r4
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve live registers
+    mov     r0, r6
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
+    ldmfd   sp!, {r0-r2,lr}             @ restore live registers
+#endif
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+    mov     r9, #0                      @ disable chaining
+#endif
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     1f                          @ bail to interpreter
+#else
+    blxeq   lr                          @ punt to interpreter and compare state
+#endif
+    ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+    mov     rFP, r10                    @ publish new FP
+    ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
+    ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
+    add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
+    str     r0, [rSELF, #offThread_methodClassDex]
+    cmp     r8, #0                      @ check the suspendCount
+    movne   r9, #0                      @ clear the chaining cell address
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+    cmp     r9, #0                      @ chaining cell exists?
+    blxne   r9                          @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1                      @ callsite is interpreted
+1:
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
+    ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
+    mov     r1, #0                      @ changeInterp = false
+    mov     r0, rSELF                   @ Expecting rSELF in r0
+    blx     r2                          @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    ldr     r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- methodToCall->insns
+
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    lr                          @ bail to the interpreter
+    tst     r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+    bne     .LinvokeNative
+#else
+    bxne    lr                          @ bail to the interpreter
+#endif
+
+    ldr     r10, .LdvmJitToInterpTraceSelectNoChain
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r3}                    @ preserve r0-r3
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                    @ restore r0-r3
+#endif
+
+    @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kInlineCacheMiss
+#endif
+    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+    @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+    @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    add     r12, lr, #2                 @ setup the punt-to-interp address
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    r12                         @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    r12                         @ bail to the interpreter
+
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r2,lr}             @ restore registers
+#endif
+
+    bx      lr                              @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr  : to branch to the chaining cell
+     *    - lr+2: to punt to the interpreter
+     *    - lr+4: to fully resolve the callee and may rechain.
+     *            r3 <- class
+     *            r9 <- counter
+     */
+    @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+    ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
+    ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
+    ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+    cmp     r3, r8          @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+    ldr     r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+    /* Don't use conditional loads if the HW defect exists */
+    bne     101f
+    ldr     r10, [r7, #0]
+101:
+#else
+    ldreq   r10, [r7, #0]
+#endif
+    add     r10, r10, #1
+    streq   r10, [r7, #0]
+#endif
+    ldreqh  r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldreqh  r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    beq     .LinvokeChainProf   @ predicted chain is valid
+    ldr     r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+    cmp     r8, #0          @ initialized class or not
+    moveq   r1, #0
+    subne   r1, r9, #1      @ count--
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
+    add     lr, lr, #4      @ return to fully-resolve landing pad
+    /*
+     * r1 <- count
+     * r2 <- &predictedChainCell
+     * r3 <- this->class
+     * r4 <- dPC
+     * r7 <- this->class->vtable
+     */
+    bx      lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    @ r7 = methodToCall->registersSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    bxne    lr                          @ bail to the interpreter
+#else
+    bx      lr                          @ bail to interpreter unconditionally
+#endif
+
+    @ go ahead and transfer control to the native code
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    mov     r2, #0
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+                                        @ newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
+
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
+    stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
+    stmfd   sp!, {r0-r3}                @ preserve r0-r3
+    mov     r0, r2
+    mov     r1, r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                @ restore r0-r3
+#endif
+
+    blx     r8                          @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    ldmfd   sp!, {r0-r1}                @ restore r2 and r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+#endif
+    @ native return; r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+    ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+    @ r0 = dalvikCallsitePC
+    bne     .LhandleException           @ no, handle exception
+
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+    cmp     r2, #0                      @ return chaining cell still exists?
+    bxne    r2                          @ yes - go ahead
+
+    @ continue executing the next instruction through the interpreter
+    ldr     r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+    add     rPC, r0, #6                 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
     .size   dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
 /* File: armv5te/footer.S */
 /*
@@ -1492,54 +1865,61 @@
 .LinvokeNative:
     @ Prep for the native call
     @ r1 = newFP, r0 = methodToCall
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     mov     r2, #0
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
     mov     r2, r0                      @ r2<- methodToCall
+    ldr     lr, [lr]                    @ lr<- set of active profilers
     mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2: methodToCall, r6: rGLUE
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
+    ands    lr, #kSubModeMethodTrace
+    beq     121f                        @ hop if not profiling
+    @ r2: methodToCall, r6: rSELF
     stmfd   sp!, {r2,r6}
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
-#endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
-#if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
-#endif
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+    b       212f
+121:
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
+212:
     @ Refresh Jit's on/off status
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
     ldr     r3, [r3]    @ r1 <- pointer to Jit profile table
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r3, [rSELF, #offThread_pJitProfTable]  @ cache current JitProfTable
     ldr     r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
-    str     r3, [rGLUE, #offGlue_pJitProfTable]  @ cache current JitProfTable
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the new mode
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1561,9 +1941,8 @@
 .LdeadFood:
     .word   0xdeadf00d
 #endif
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     mov     r2, #0
-    str     r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
@@ -1584,6 +1963,8 @@
     .word   dvmMterpCommonExceptionThrown
 .LdvmLockObject:
     .word   dvmLockObject
+.LdvmJitTraceProfilingOff:
+    .word   dvmJitTraceProfilingOff
 #if defined(WITH_JIT_TUNING)
 .LdvmICHitCount:
     .word   gDvmICHitCount
@@ -1592,14 +1973,12 @@
 .LdvmSelfVerificationMemOpDecode:
     .word   dvmSelfVerificationMemOpDecode
 #endif
-#if defined(WITH_INLINE_PROFILING)
 .LdvmFastMethodTraceEnter:
     .word   dvmFastMethodTraceEnter
 .LdvmFastNativeMethodTraceExit:
     .word   dvmFastNativeMethodTraceExit
 .LdvmFastJavaMethodTraceExit:
     .word   dvmFastJavaMethodTraceExit
-#endif
 .L__aeabi_cdcmple:
     .word   __aeabi_cdcmple
 .L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 6d40d60..7ea4647 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -62,7 +62,7 @@
 
   reg nick      purpose
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     thread pointer
 
 The following registers have fixed assignments in mterp but are scratch
 registers in compiled code
@@ -80,7 +80,7 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -177,16 +166,17 @@
      * address in the code cache following the invoke instruction. Otherwise
      * return to the special dvmJitToInterpNoChain entry point.
      */
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
-    @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
@@ -195,7 +185,6 @@
 #endif
     ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
     beq     1f                          @ bail to interpreter
@@ -205,16 +194,15 @@
     ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
     mov     rFP, r10                    @ publish new FP
     ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
-    ldr     r8, [r8]                    @ r8<- suspendCount
 
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
     add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
-    str     r0, [rGLUE, #offGlue_methodClassDex]
+    str     r0, [rSELF, #offThread_methodClassDex]
     cmp     r8, #0                      @ check the suspendCount
     movne   r9, #0                      @ clear the chaining cell address
-    str     r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
     cmp     r9, #0                      @ chaining cell exists?
     blxne   r9                          @ jump to the chaining cell
 #if defined(WITH_JIT_TUNING)
@@ -222,10 +210,10 @@
 #endif
     mov     pc, r1                      @ callsite is interpreted
 1:
-    stmia   rGLUE, {rPC, rFP}           @ SAVE_PC_FP_TO_GLUE()
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
     ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
     mov     r1, #0                      @ changeInterp = false
-    mov     r0, rGLUE                   @ Expecting rGLUE in r0
+    mov     r0, rSELF                   @ Expecting rSELF in r0
     blx     r2                          @ exit the interpreter
 
 /* ------------------------------ */
@@ -241,14 +229,13 @@
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
     ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -274,18 +261,18 @@
 
     ldr     r10, .LdvmJitToInterpTraceSelectNoChain
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -309,15 +296,14 @@
     @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
     @ methodToCall is guaranteed to be non-native
 .LinvokeChain:
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
     add     r12, lr, #2                 @ setup the punt-to-interp address
     sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
-    ldr     r8, [r8]                    @ r8<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    r12                         @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -333,18 +319,18 @@
     bxne    r12                         @ bail to the interpreter
 
     ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
     mov     rFP, r1                         @ fp = newFp
-    str     rFP, [r2, #offThread_curFrame]  @ self->curFrame = newFp
-#if defined(WITH_INLINE_PROFILING)
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -382,7 +368,7 @@
     ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
     ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
     ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
-    ldr     r9, [rGLUE, #offGlue_icRechainCount]   @ r1 <- shared rechainCount
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
     cmp     r3, r8          @ predicted class == actual class?
 #if defined(WITH_JIT_TUNING)
     ldr     r7, .LdvmICHitCount
@@ -404,7 +390,7 @@
     cmp     r8, #0          @ initialized class or not
     moveq   r1, #0
     subne   r1, r9, #1      @ count--
-    strne   r1, [rGLUE, #offGlue_icRechainCount]   @ write back to InterpState
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
     add     lr, lr, #4      @ return to fully-resolve landing pad
     /*
      * r1 <- count
@@ -422,13 +408,12 @@
 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
     @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
     @ r7 = methodToCall->registersSize
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
-    ldr     r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
     add     r3, r1, #1  @ Thumb addr is odd
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
     sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
-    ldr     r8, [r8]                    @ r3<- suspendCount (int)
     cmp     r10, r9                     @ bottom < interpStackEnd?
     bxlo    lr                          @ return to raise stack overflow excep.
     @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
@@ -438,7 +423,6 @@
     @ set up newSaveArea
     str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
     str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
     cmp     r8, #0                      @ suspendCount != 0
     ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
@@ -449,50 +433,52 @@
 #endif
 
     @ go ahead and transfer control to the native code
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
     mov     r2, #0
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
-    mov     r2, r0                      @ r2<- methodToCall
-    mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=methodToCall, r6=rGLUE
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
     stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
     mov     r0, r2
     mov     r1, r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
     blx     r8                          @ off to the native code
 
-#if defined(WITH_INLINE_PROFILING)
+#if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
-    @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
     ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1108,9 +1094,9 @@
      */
     vpush   {d0-d15}                    @ save out all fp registers
     push    {r0-r12,lr}                 @ save out all registers
+    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     mov     r0, lr                      @ arg0 <- link register
     mov     r1, sp                      @ arg1 <- stack pointer
-    ldr     r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
     blx     r2                          @ decode and handle the mem op
     pop     {r0-r12,lr}                 @ restore all registers
     vpop    {d0-d15}                    @ restore all fp registers
@@ -1390,7 +1376,7 @@
      *        r1 - the Dalvik PC to begin interpretation.
      *    else
      *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
+     *    rSELF - pointer to thread
      *    rFP - Dalvik frame pointer
      */
     cmp     lr, #0
@@ -1431,10 +1417,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2                           @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r0, [r0]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
     ldr     r2, .LdvmJitToInterpNoChain
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     @ Bail to interpreter - no chain [note - r4 still contains rPC]
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kHeavyweightMonitor
@@ -1462,11 +1448,10 @@
     str     r3, [r0, #offThread_inJitCodeCache]
     blx     r2             @ dvmLockObject(self, obj)
     @ refresh Jit's on/off status & test for exception
-    ldr     r0, [rGLUE, #offGlue_ppJitProfTable]
-    ldr     r1, [rGLUE, #offGlue_self]
-    ldr     r0, [r0]
-    ldr     r1, [r1, #offThread_exception]
-    str     r0, [rGLUE, #offGlue_pJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
+    ldr     r3, [r3]
+    ldr     r1, [rSELF, #offThread_exception]
+    str     r3, [rSELF, #offThread_pJitProfTable]
     cmp     r1, #0
     beq     1f
     ldr     r2, .LhandleException
@@ -1479,6 +1464,394 @@
 #endif
     ldr     pc, .LdvmJitToInterpNoChain
 
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (lr-11) is address of pointer to counter.  Note: the counter
+     *    actually exists 10 bytes before the return target, but because
+     *    we are arriving from thumb mode, lr will have its low bit set.
+     */
+     ldr    r0, [lr,#-11]
+     ldr    r1, [rSELF, #offThread_pProfileCountdown]
+     ldr    r2, [r0]                    @ get counter
+     ldr    r3, [r1]                    @ get countdown timer
+     add    r2, #1
+     subs   r2, #1
+     blt    .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+     str    r2, [r0]
+     str    r3, [r1]
+     bx     lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+     mov    r4, lr                     @ preserve lr
+     ldr    r0, .LdvmJitTraceProfilingOff
+     blx    r0
+     bx     r4
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve live registers
+    mov     r0, r6
+    @ r0=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
+    ldmfd   sp!, {r0-r2,lr}             @ restore live registers
+#endif
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+    mov     r9, #0                      @ disable chaining
+#endif
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     1f                          @ bail to interpreter
+#else
+    blxeq   lr                          @ punt to interpreter and compare state
+#endif
+    ldr     r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+    mov     rFP, r10                    @ publish new FP
+    ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
+    ldr     r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+    str     rFP, [rSELF, #offThread_curFrame] @ self->curFrame = fp
+    add     rPC, rPC, #6                @ publish new rPC (advance 6 bytes)
+    str     r0, [rSELF, #offThread_methodClassDex]
+    cmp     r8, #0                      @ check the suspendCount
+    movne   r9, #0                      @ clear the chaining cell address
+    str     r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+    cmp     r9, #0                      @ chaining cell exists?
+    blxne   r9                          @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1                      @ callsite is interpreted
+1:
+    stmia   rSELF, {rPC, rFP}           @ SAVE_PC_FP_TO_SELF()
+    ldr     r2, .LdvmMterpStdBail       @ defined in footer.S
+    mov     r1, #0                      @ changeInterp = false
+    mov     r0, rSELF                   @ Expecting rSELF in r0
+    blx     r2                          @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    ldrh    r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldrh    r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount] @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    ldr     r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- methodToCall->insns
+
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    lr                          @ bail to the interpreter
+    tst     r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+    bne     .LinvokeNative
+#else
+    bxne    lr                          @ bail to the interpreter
+#endif
+
+    ldr     r10, .LdvmJitToInterpTraceSelectNoChain
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r3}                    @ preserve r0-r3
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                    @ restore r0-r3
+#endif
+
+    @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kInlineCacheMiss
+#endif
+    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+    @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+    @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    add     r12, lr, #2                 @ setup the punt-to-interp address
+    sub     r10, r10, r2, lsl #2        @ r10<- bottom (newsave - outsSize)
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    r12                         @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    ldr     r9, [r0, #offMethod_clazz]      @ r9<- method->clazz
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    bxne    r12                         @ bail to the interpreter
+
+    ldr     r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+    @ Update "thread" values for the new method
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+    mov     rFP, r1                         @ fp = newFp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
+    mov     r1, r6
+    @ r0=methodToCall, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r2,lr}             @ restore registers
+#endif
+
+    bx      lr                              @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr  : to branch to the chaining cell
+     *    - lr+2: to punt to the interpreter
+     *    - lr+4: to fully resolve the callee and may rechain.
+     *            r3 <- class
+     *            r9 <- counter
+     */
+    @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+    ldr     r3, [r0, #offObject_clazz]  @ r3 <- this->class
+    ldr     r8, [r2, #4]    @ r8 <- predictedChainCell->clazz
+    ldr     r0, [r2, #8]    @ r0 <- predictedChainCell->method
+    ldr     r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+    cmp     r3, r8          @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+    ldr     r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+    /* Don't use conditional loads if the HW defect exists */
+    bne     101f
+    ldr     r10, [r7, #0]
+101:
+#else
+    ldreq   r10, [r7, #0]
+#endif
+    add     r10, r10, #1
+    streq   r10, [r7, #0]
+#endif
+    ldreqh  r7, [r0, #offMethod_registersSize]  @ r7<- methodToCall->regsSize
+    ldreqh  r2, [r0, #offMethod_outsSize]  @ r2<- methodToCall->outsSize
+    beq     .LinvokeChainProf   @ predicted chain is valid
+    ldr     r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+    cmp     r8, #0          @ initialized class or not
+    moveq   r1, #0
+    subne   r1, r9, #1      @ count--
+    strne   r1, [rSELF, #offThread_icRechainCount]  @ write back to thread
+    add     lr, lr, #4      @ return to fully-resolve landing pad
+    /*
+     * r1 <- count
+     * r2 <- &predictedChainCell
+     * r3 <- this->class
+     * r4 <- dPC
+     * r7 <- this->class->vtable
+     */
+    bx      lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+    @ r7 = methodToCall->registersSize
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r8, [rSELF, #offThread_suspendCount]      @ r8<- suspendCount
+    add     r3, r1, #1  @ Thumb addr is odd
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    sub     r1, r1, r7, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- stack save area
+    cmp     r10, r9                     @ bottom < interpStackEnd?
+    bxlo    lr                          @ return to raise stack overflow excep.
+    @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+    str     rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+    str     rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+    @ set up newSaveArea
+    str     rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+    str     r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+    str     r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+    cmp     r8, #0                      @ suspendCount != 0
+    ldr     r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    bxne    lr                          @ bail to the interpreter
+#else
+    bx      lr                          @ bail to interpreter unconditionally
+#endif
+
+    @ go ahead and transfer control to the native code
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    mov     r2, #0
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+    str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+                                        @ newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
+
+    mov     r2, r0                        @ arg2<- methodToCall
+    mov     r0, r1                        @ arg0<- newFP
+    add     r1, rSELF, #offThread_retval  @ arg1<- &retval
+    mov     r3, rSELF                     @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    @ r2=methodToCall, r6=rSELF
+    stmfd   sp!, {r2,r6}                @ to be consumed after JNI return
+    stmfd   sp!, {r0-r3}                @ preserve r0-r3
+    mov     r0, r2
+    mov     r1, r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
+    ldmfd   sp!, {r0-r3}                @ restore r0-r3
+#endif
+
+    blx     r8                          @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    ldmfd   sp!, {r0-r1}                @ restore r2 and r6
+    @ r0=JNIMethod, r1=rSELF
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+#endif
+    @ native return; r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+    ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    ldr     r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+    @ r0 = dalvikCallsitePC
+    bne     .LhandleException           @ no, handle exception
+
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+    cmp     r2, #0                      @ return chaining cell still exists?
+    bxne    r2                          @ yes - go ahead
+
+    @ continue executing the next instruction through the interpreter
+    ldr     r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+    add     rPC, r0, #6                 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+    mov     r0, #kCallsiteInterpreted
+#endif
+    mov     pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
     .size   dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
 /* File: armv5te/footer.S */
 /*
@@ -1492,54 +1865,61 @@
 .LinvokeNative:
     @ Prep for the native call
     @ r1 = newFP, r0 = methodToCall
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
     mov     r2, #0
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
                                         @ newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- new stack save area
 
     mov     r2, r0                      @ r2<- methodToCall
+    ldr     lr, [lr]                    @ lr<- set of active profilers
     mov     r0, r1                      @ r0<- newFP
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
-#if defined(WITH_INLINE_PROFILING)
-    @ r2: methodToCall, r6: rGLUE
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
+    ands    lr, #kSubModeMethodTrace
+    beq     121f                        @ hop if not profiling
+    @ r2: methodToCall, r6: rSELF
     stmfd   sp!, {r2,r6}
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
-#endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
-#if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
-#endif
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
+    b       212f
+121:
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
+212:
     @ Refresh Jit's on/off status
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
+    ldr     r3, [rSELF, #offThread_ppJitProfTable]
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
     ldr     r3, [r3]    @ r1 <- pointer to Jit profile table
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r3, [rSELF, #offThread_pJitProfTable]  @ cache current JitProfTable
     ldr     r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
-    str     r3, [rGLUE, #offGlue_pJitProfTable]  @ cache current JitProfTable
 
     @ r0 = dalvikCallsitePC
     bne     .LhandleException           @ no, handle exception
 
-    str     r2, [r9, #offThread_inJitCodeCache] @ set the new mode
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
     cmp     r2, #0                      @ return chaining cell still exists?
     bxne    r2                          @ yes - go ahead
 
@@ -1561,9 +1941,8 @@
 .LdeadFood:
     .word   0xdeadf00d
 #endif
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     mov     r2, #0
-    str     r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
+    str     r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
@@ -1584,6 +1963,8 @@
     .word   dvmMterpCommonExceptionThrown
 .LdvmLockObject:
     .word   dvmLockObject
+.LdvmJitTraceProfilingOff:
+    .word   dvmJitTraceProfilingOff
 #if defined(WITH_JIT_TUNING)
 .LdvmICHitCount:
     .word   gDvmICHitCount
@@ -1592,14 +1973,12 @@
 .LdvmSelfVerificationMemOpDecode:
     .word   dvmSelfVerificationMemOpDecode
 #endif
-#if defined(WITH_INLINE_PROFILING)
 .LdvmFastMethodTraceEnter:
     .word   dvmFastMethodTraceEnter
 .LdvmFastNativeMethodTraceExit:
     .word   dvmFastNativeMethodTraceExit
 .LdvmFastJavaMethodTraceExit:
     .word   dvmFastJavaMethodTraceExit
-#endif
 .L__aeabi_cdcmple:
     .word   __aeabi_cdcmple
 .L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
index 7726e97..4e86d09 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
@@ -23,6 +23,12 @@
 
 #if defined(WITH_JIT)
 
+/* Subset of defines from mterp/x86/header.S */
+#define rSELF (%ebp)
+#define rPC   %esi
+#define rFP   %edi
+#define rINST %ebx
+
 /*
  * This is a #include, not a %include, because we want the C pre-processor
  * to expand the macros into assembler assignment statements.
@@ -51,30 +57,41 @@
 dvmCompiler_TEMPLATE_INTERPRET:
 /* File: ia32/TEMPLATE_INTERPRET.S */
     /*
-     * TODO: figure out how best to do this on x86, as we don't have
-     * an lr equivalent and probably don't want to push.
+     * This handler is a bit odd - it may be called via chaining or
+     * from static code and is expected to cause control to flow
+     * to the interpreter.  The problem is where to find the Dalvik
+     * PC of the next instruction.  When called via chaining, the dPC
+     * will be located at *rp.  When called from static code, rPC is
+     * valid and rp is a real return pointer (that should be ignored).
+     * The Arm target deals with this by using the link register as
+     * a flag.  If it is zero, we know we were called from static code.
+     * If non-zero, it points to the chain cell containing dPC.
+     * For x86, we'll infer the source by looking where rp points.
+     * If it points to anywhere within the code cache, we'll assume
+     * we got here via chaining.  Otherwise, we'll assume rPC is valid.
      *
-     * This handler transfers control to the interpeter without performing
-     * any lookups.  It may be called either as part of a normal chaining
-     * operation, or from the transition code in header.S.  We distinquish
-     * the two cases by looking at the link register.  If called from a
-     * translation chain, it will point to the chaining Dalvik PC -3.
      * On entry:
-     *    lr - if NULL:
-     *        r1 - the Dalvik PC to begin interpretation.
-     *    else
-     *        [lr, #3] contains Dalvik PC to begin interpretation
-     *    rGLUE - pointer to interpState
-     *    rFP - Dalvik frame pointer
-     *
-     *cmp     lr, #0
-     *ldrne   r1,[lr, #3]
-     *ldr     r2, .LinterpPunt
-     *mov     r0, r1                       @ set Dalvik PC
-     *bx      r2
-     *@ doesn't return
+     *    (TOS)<- return pointer or pointer to dPC
      */
 
+/*
+ * FIXME - this won't work as-is.  The cache boundaries are not
+ * set up until later.  Perhaps rething this whole thing.  Do we
+ * really need an interpret teplate?
+ */
+
+
+     movl   rSELF,%ecx
+     movl   $.LinterpPunt,%edx
+     pop    %eax
+     /*cmpl   %eax,offThread_jitCacheEnd(%ecx)*/
+     ja     1f
+     /*cmpl   %eax,offThread_jitCacheStart(%ecx)*/
+     jb     1f
+     movl   %eax,rPC
+1:
+     jmp    *(%edx)
+
 .LinterpPunt:
     .long   dvmJitToInterpPunt
 
@@ -88,14 +105,6 @@
 
     .text
     .align  4
-/*
- * FIXME - need a cacheflush for x86
- */
-    .global cacheflush
-cacheflush:
-    movl  $0xdeadf0f0, %eax
-    call *%eax
-
 
     .global dmvCompilerTemplateEnd
 dmvCompilerTemplateEnd:
diff --git a/vm/hprof/Hprof.c b/vm/hprof/Hprof.c
index 2cc597a..3f5e316 100644
--- a/vm/hprof/Hprof.c
+++ b/vm/hprof/Hprof.c
@@ -40,12 +40,8 @@
 {
     hprofStartup_String();
     hprofStartup_Class();
-#if WITH_HPROF_STACK
-    hprofStartup_StackFrame();
-    hprofStartup_Stack();
-#endif
 
-    hprof_context_t *ctx = malloc(sizeof(*ctx));
+    hprof_context_t *ctx = (hprof_context_t *)malloc(sizeof(*ctx));
     if (ctx == NULL) {
         LOGE("hprof: can't allocate context.\n");
         return NULL;
@@ -72,7 +68,7 @@
      * Create a new context struct for the start of the file.  We
      * heap-allocate it so we can share the "free" function.
      */
-    hprof_context_t *headCtx = malloc(sizeof(*headCtx));
+    hprof_context_t *headCtx = (hprof_context_t *)malloc(sizeof(*headCtx));
     if (headCtx == NULL) {
         LOGE("hprof: can't allocate context.\n");
         hprofFreeContext(tailCtx);
@@ -93,19 +89,10 @@
     hprofAddU4ToRecord(&headCtx->curRec, HPROF_NULL_THREAD);
     hprofAddU4ToRecord(&headCtx->curRec, 0);    // no frames
 
-#if WITH_HPROF_STACK
-    hprofDumpStackFrames(headCtx);
-    hprofDumpStacks(headCtx);
-#endif
-
     hprofFlushCurrentRecord(headCtx);
 
     hprofShutdown_Class();
     hprofShutdown_String();
-#if WITH_HPROF_STACK
-    hprofShutdown_Stack();
-    hprofShutdown_StackFrame();
-#endif
 
     /* flush to ensure memstream pointer and size are updated */
     fflush(headCtx->memFp);
@@ -218,7 +205,7 @@
     if (obj == NULL) {
         return;
     }
-    ctx = arg;
+    ctx = (hprof_context_t *)arg;
     ctx->gcScanState = xlate[type];
     ctx->gcThreadSerialNumber = threadId;
     hprofMarkRootObject(ctx, obj, 0);
@@ -236,8 +223,8 @@
 
     assert(ptr != NULL);
     assert(arg != NULL);
-    obj = ptr;
-    ctx = arg;
+    obj = (Object *)ptr;
+    ctx = (hprof_context_t *)arg;
     hprofDumpHeapObject(ctx, obj);
 }
 
diff --git a/vm/hprof/Hprof.h b/vm/hprof/Hprof.h
index f95e2e1..c79a3df 100644
--- a/vm/hprof/Hprof.h
+++ b/vm/hprof/Hprof.h
@@ -31,9 +31,6 @@
 typedef hprof_id hprof_string_id;
 typedef hprof_id hprof_object_id;
 typedef hprof_id hprof_class_object_id;
-#if WITH_HPROF_STACK
-typedef hprof_id hprof_stack_frame_id;
-#endif
 
 typedef enum hprof_basic_type {
     hprof_basic_object = 2,
@@ -217,30 +214,6 @@
 #define hprofAddIdListToRecord(rec, values, numValues) \
             hprofAddU4ListToRecord((rec), (const u4 *)(values), (numValues))
 
-#if WITH_HPROF_STACK
-
-/*
- * HprofStack.c functions
- */
-
-void hprofFillInStackTrace(void *objectPtr);
-
-int hprofDumpStacks(hprof_context_t *ctx);
-
-int hprofStartup_Stack(void);
-int hprofShutdown_Stack(void);
-
-/*
- * HprofStackFrame.c functions
- */
-
-int hprofDumpStackFrames(hprof_context_t *ctx);
-
-int hprofStartup_StackFrame(void);
-int hprofShutdown_StackFrame(void);
-
-#endif
-
 /*
  * Hprof.c functions
  */
diff --git a/vm/hprof/HprofHeap.c b/vm/hprof/HprofHeap.c
index 75a1d2b..ee24a7a 100644
--- a/vm/hprof/HprofHeap.c
+++ b/vm/hprof/HprofHeap.c
@@ -214,14 +214,8 @@
 
 static int
 stackTraceSerialNumber(const void *obj)
-
 {
-#if WITH_HPROF_STACK
-    DvmHeapChunk *chunk = ptr2chunk(obj);
-    return chunk->stackTraceSerialNumber;
-#else
     return HPROF_NULL_STACK_TRACE;
-#endif
 }
 
 int
diff --git a/vm/hprof/HprofOutput.c b/vm/hprof/HprofOutput.c
index 25512b2..b84b298 100644
--- a/vm/hprof/HprofOutput.c
+++ b/vm/hprof/HprofOutput.c
@@ -85,7 +85,7 @@
     ctx->fd = fd;
 
     ctx->curRec.allocLen = 128;
-    ctx->curRec.body = malloc(ctx->curRec.allocLen);
+    ctx->curRec.body = (unsigned char *)malloc(ctx->curRec.allocLen);
 //xxx check for/return an error
 
     if (writeHeader) {
@@ -194,7 +194,7 @@
         if (newAllocLen < minSize) {
             newAllocLen = rec->allocLen + nmore + nmore/2;
         }
-        newBody = realloc(rec->body, newAllocLen);
+        newBody = (unsigned char *)realloc(rec->body, newAllocLen);
         if (newBody != NULL) {
             rec->body = newBody;
             rec->allocLen = newAllocLen;
diff --git a/vm/hprof/HprofStack.c b/vm/hprof/HprofStack.c
deleted file mode 100644
index 04641ef..0000000
--- a/vm/hprof/HprofStack.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Hprof.h"
-#include "HprofStack.h"
-#include "alloc/HeapInternal.h"
-
-static HashTable *gStackTraceHashTable = NULL;
-static int gSerialNumber = 0;
-
-/* Number of stack frames to cache */
-#define STACK_DEPTH 8
-
-typedef struct {
-    int serialNumber;
-    int threadSerialNumber;
-    int frameIds[STACK_DEPTH];
-} StackTrace;
-
-typedef struct {
-    StackTrace trace;
-    u1 live;
-} StackTraceEntry;
-
-static u4 computeStackTraceHash(const StackTraceEntry *stackTraceEntry);
-
-int
-hprofStartup_Stack()
-{
-    HashIter iter;
-
-    /* This will be called when a GC begins. */
-    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter)) {
-        StackTraceEntry *stackTraceEntry;
-
-        /* Clear the 'live' bit at the start of the GC pass. */
-        stackTraceEntry = (StackTraceEntry *) dvmHashIterData(&iter);
-        stackTraceEntry->live = 0;
-    }
-
-    return 0;
-}
-
-int
-hprofShutdown_Stack()
-{
-    HashIter iter;
-
-    /* This will be called when a GC has completed. */
-    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter)) {
-        StackTraceEntry *stackTraceEntry;
-
-        /*
-         * If the 'live' bit is 0, the trace is not in use by any current
-         * heap object and may be destroyed.
-         */
-        stackTraceEntry = (StackTraceEntry *) dvmHashIterData(&iter);
-        if (!stackTraceEntry->live) {
-            dvmHashTableRemove(gStackTraceHashTable,
-                    computeStackTraceHash(stackTraceEntry), stackTraceEntry);
-            free(stackTraceEntry);
-        }
-    }
-
-    return 0;
-}
-
-static u4
-computeStackTraceHash(const StackTraceEntry *stackTraceEntry)
-{
-    u4 hash = 0;
-    const char *cp = (const char *) &stackTraceEntry->trace;
-    int i;
-
-    for (i = 0; i < (int) sizeof(StackTrace); i++) {
-        hash = hash * 31 + cp[i];
-    }
-
-    return hash;
-}
-
-/* Only compare the 'trace' portion of the StackTraceEntry. */
-static int
-stackCmp(const void *tableItem, const void *looseItem)
-{
-    return memcmp(&((StackTraceEntry *) tableItem)->trace,
-            &((StackTraceEntry *) looseItem)->trace, sizeof(StackTrace));
-}
-
-static StackTraceEntry *
-stackDup(const StackTraceEntry *stackTrace)
-{
-    StackTraceEntry *newStackTrace = malloc(sizeof(StackTraceEntry));
-    memcpy(newStackTrace, stackTrace, sizeof(StackTraceEntry));
-    return newStackTrace;
-}
-
-static u4
-hprofLookupStackSerialNumber(const StackTraceEntry *stackTrace)
-{
-    StackTraceEntry *val;
-    u4 hashValue;
-    int serial;
-
-    /*
-     * Create the hash table on first contact.  We can't do this in
-     * hprofStartupStack, because we have to compute stack trace
-     * serial numbers and place them into object headers before the
-     * rest of hprof is triggered by a GC event.
-     */
-    if (gStackTraceHashTable == NULL) {
-        gStackTraceHashTable = dvmHashTableCreate(512, free);
-    }
-    dvmHashTableLock(gStackTraceHashTable);
-
-    hashValue = computeStackTraceHash(stackTrace);
-    val = dvmHashTableLookup(gStackTraceHashTable, hashValue, (void *)stackTrace,
-            (HashCompareFunc)stackCmp, false);
-    if (val == NULL) {
-        StackTraceEntry *newStackTrace;
-
-        newStackTrace = stackDup(stackTrace);
-        newStackTrace->trace.serialNumber = ++gSerialNumber;
-        val = dvmHashTableLookup(gStackTraceHashTable, hashValue,
-                (void *)newStackTrace, (HashCompareFunc)stackCmp, true);
-        assert(val != NULL);
-    }
-
-    /* Mark the trace as live (in use by an object in the current heap). */
-    val->live = 1;
-
-    /* Grab the serial number before unlocking the table. */
-    serial = val->trace.serialNumber;
-
-    dvmHashTableUnlock(gStackTraceHashTable);
-
-    return serial;
-}
-
-int
-hprofDumpStacks(hprof_context_t *ctx)
-{
-    HashIter iter;
-    hprof_record_t *rec = &ctx->curRec;
-
-    dvmHashTableLock(gStackTraceHashTable);
-
-    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter))
-    {
-        const StackTraceEntry *stackTraceEntry;
-        int count;
-        int i;
-
-        hprofStartNewRecord(ctx, HPROF_TAG_STACK_TRACE, HPROF_TIME);
-
-        stackTraceEntry = (const StackTraceEntry *) dvmHashIterData(&iter);
-        assert(stackTraceEntry != NULL);
-
-        /* STACK TRACE format:
-         *
-         * u4:     serial number for this stack
-         * u4:     serial number for the running thread
-         * u4:     number of frames
-         * [ID]*:  ID for the stack frame
-         */
-        hprofAddU4ToRecord(rec, stackTraceEntry->trace.serialNumber);
-        hprofAddU4ToRecord(rec, stackTraceEntry->trace.threadSerialNumber);
-
-        count = 0;
-        while ((count < STACK_DEPTH) &&
-               (stackTraceEntry->trace.frameIds[count] != 0)) {
-            count++;
-        }
-        hprofAddU4ToRecord(rec, count);
-        for (i = 0; i < count; i++) {
-            hprofAddU4ToRecord(rec, stackTraceEntry->trace.frameIds[i]);
-        }
-    }
-
-    dvmHashTableUnlock(gStackTraceHashTable);
-
-    return 0;
-}
-
-void
-hprofFillInStackTrace(void *objectPtr)
-
-{
-    DvmHeapChunk *chunk;
-    StackTraceEntry stackTraceEntry;
-    Thread* self;
-    void* fp;
-    int i;
-
-    if (objectPtr == NULL) {
-        return;
-    }
-    self = dvmThreadSelf();
-    if (self == NULL) {
-        return;
-    }
-    fp = self->curFrame;
-
-    /* Serial number to be filled in later. */
-    stackTraceEntry.trace.serialNumber = -1;
-
-    /*
-     * TODO - The HAT tool doesn't care about thread data, so we can defer
-     * actually emitting thread records and assigning thread serial numbers.
-     */
-    stackTraceEntry.trace.threadSerialNumber = (int) self;
-
-    memset(&stackTraceEntry.trace.frameIds, 0,
-            sizeof(stackTraceEntry.trace.frameIds));
-
-    i = 0;
-    while ((fp != NULL) && (i < STACK_DEPTH)) {
-        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
-        const Method* method = saveArea->method;
-        StackFrameEntry frame;
-
-        if (!dvmIsBreakFrame(fp)) {
-            frame.frame.method = method;
-            if (dvmIsNativeMethod(method)) {
-                frame.frame.pc = 0; /* no saved PC for native methods */
-            } else {
-                assert(saveArea->xtra.currentPc >= method->insns &&
-                        saveArea->xtra.currentPc <
-                        method->insns + dvmGetMethodInsnsSize(method));
-                frame.frame.pc = (int) (saveArea->xtra.currentPc -
-                        method->insns);
-            }
-
-            // Canonicalize the frame and cache it in the hprof context
-            stackTraceEntry.trace.frameIds[i++] =
-                hprofLookupStackFrameId(&frame);
-        }
-
-        assert(fp != saveArea->prevFrame);
-        fp = saveArea->prevFrame;
-    }
-
-    /* Store the stack trace serial number in the object header */
-    chunk = ptr2chunk(objectPtr);
-    chunk->stackTraceSerialNumber =
-            hprofLookupStackSerialNumber(&stackTraceEntry);
-}
diff --git a/vm/hprof/HprofStack.h b/vm/hprof/HprofStack.h
deleted file mode 100644
index 1f16c1e..0000000
--- a/vm/hprof/HprofStack.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef _DALVIK_HPROF_STACK
-#define _DALVIK_HPROF_STACK
-
-#include "../alloc/HeapInternal.h"
-
-typedef struct {
-    const Method *method;
-    int pc;
-} StackFrame;
-
-typedef struct {
-    StackFrame frame;
-    unsigned char live;
-} StackFrameEntry;
-
-int hprofStartupStack();
-int hprofShutdown_Stack();
-int hprofDumpStacks(hprof_context_t *ctx);
-void hprofFillInStackTrace(void *objectPtr);
-
-int hprofStartup_StackFrame();
-int hprofShutdown_StackFrame();
-hprof_stack_frame_id hprofLookupStackFrameId(const StackFrameEntry
-    *stackFrameEntry);
-int hprofDumpStackFrames(hprof_context_t *ctx);
-
-#endif /* _DALVIK_HPROF_STACK */
diff --git a/vm/hprof/HprofStackFrame.c b/vm/hprof/HprofStackFrame.c
deleted file mode 100644
index f9c789e..0000000
--- a/vm/hprof/HprofStackFrame.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Hprof.h"
-#include "HprofStack.h"
-
-#include "alloc/HeapInternal.h"
-
-static HashTable *gStackFrameHashTable;
-
-static u4 computeStackFrameHash(const StackFrameEntry *stackFrameEntry);
-
-int
-hprofStartup_StackFrame()
-{
-    HashIter iter;
-
-    /* Cache the string "<unknown>" for use when the source file is
-     * unknown.
-     */
-    hprofLookupStringId("<unknown>");
-
-    /* This will be called when a GC begins. */
-    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter)) {
-        StackFrameEntry *stackFrameEntry;
-        const Method *method;
-
-        /* Clear the 'live' bit at the start of the GC pass. */
-        stackFrameEntry = (StackFrameEntry *) dvmHashIterData(&iter);
-        stackFrameEntry->live = 0;
-
-        method = stackFrameEntry->frame.method;
-        if (method == NULL) {
-            continue;
-        }
-
-        /* Make sure the method name, descriptor, and source file are in
-         * the string table, and that the method class is in the class
-         * table. This is needed because strings and classes will be dumped
-         * before stack frames.
-         */
-
-        if (method->name) {
-            hprofLookupStringId(method->name);
-        }
-
-        DexStringCache cache;
-        const char* descriptor;
-
-        dexStringCacheInit(&cache);
-        descriptor = dexProtoGetMethodDescriptor(&method->prototype, &cache);
-        hprofLookupStringId(descriptor);
-        dexStringCacheRelease(&cache);
-
-        const char* sourceFile = dvmGetMethodSourceFile(method);
-        if (sourceFile) {
-            hprofLookupStringId(sourceFile);
-        }
-
-        if (method->clazz) {
-            hprofLookupClassId(method->clazz);
-        }
-    }
-
-    return 0;
-}
-
-int
-hprofShutdown_StackFrame()
-{
-    HashIter iter;
-
-    /* This will be called when a GC has completed. */
-    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter)) {
-        const StackFrameEntry *stackFrameEntry;
-
-        /*
-         * If the 'live' bit is 0, the frame is not in use by any current
-         * heap object and may be destroyed.
-         */
-        stackFrameEntry = (const StackFrameEntry *) dvmHashIterData(&iter);
-        if (!stackFrameEntry->live) {
-            dvmHashTableRemove(gStackFrameHashTable,
-                    computeStackFrameHash(stackFrameEntry),
-                    (void*) stackFrameEntry);
-            free((void*) stackFrameEntry);
-        }
-    }
-
-    return 0;
-}
-
-/* Only hash the 'frame' portion of the StackFrameEntry. */
-static u4
-computeStackFrameHash(const StackFrameEntry *stackFrameEntry)
-{
-    u4 hash = 0;
-    const char *cp = (char *) &stackFrameEntry->frame;
-    int i;
-
-    for (i = 0; i < (int) sizeof(StackFrame); i++) {
-        hash = 31 * hash + cp[i];
-    }
-    return hash;
-}
-
-/* Only compare the 'frame' portion of the StackFrameEntry. */
-static int
-stackFrameCmp(const void *tableItem, const void *looseItem)
-{
-    return memcmp(&((StackFrameEntry *)tableItem)->frame,
-            &((StackFrameEntry *) looseItem)->frame, sizeof(StackFrame));
-}
-
-static StackFrameEntry *
-stackFrameDup(const StackFrameEntry *stackFrameEntry)
-{
-    StackFrameEntry *newStackFrameEntry = malloc(sizeof(StackFrameEntry));
-    memcpy(newStackFrameEntry, stackFrameEntry, sizeof(StackFrameEntry));
-    return newStackFrameEntry;
-}
-
-hprof_stack_frame_id
-hprofLookupStackFrameId(const StackFrameEntry *stackFrameEntry)
-{
-    StackFrameEntry *val;
-    u4 hashValue;
-
-    /*
-     * Create the hash table on first contact.  We can't do this in
-     * hprofStartupStackFrame, because we have to compute stack trace
-     * serial numbers and place them into object headers before the
-     * rest of hprof is triggered by a GC event.
-     */
-    if (gStackFrameHashTable == NULL) {
-        gStackFrameHashTable = dvmHashTableCreate(512, free);
-    }
-    dvmHashTableLock(gStackFrameHashTable);
-
-    hashValue = computeStackFrameHash(stackFrameEntry);
-    val = dvmHashTableLookup(gStackFrameHashTable, hashValue,
-        (void *)stackFrameEntry, (HashCompareFunc)stackFrameCmp, false);
-    if (val == NULL) {
-        const StackFrameEntry *newStackFrameEntry;
-
-        newStackFrameEntry = stackFrameDup(stackFrameEntry);
-        val = dvmHashTableLookup(gStackFrameHashTable, hashValue,
-            (void *)newStackFrameEntry, (HashCompareFunc)stackFrameCmp, true);
-        assert(val != NULL);
-    }
-
-    /* Mark the frame as live (in use by an object in the current heap). */
-    val->live = 1;
-
-    dvmHashTableUnlock(gStackFrameHashTable);
-
-    return (hprof_stack_frame_id) val;
-}
-
-int
-hprofDumpStackFrames(hprof_context_t *ctx)
-{
-    HashIter iter;
-    hprof_record_t *rec = &ctx->curRec;
-
-    dvmHashTableLock(gStackFrameHashTable);
-
-    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
-         !dvmHashIterDone(&iter);
-         dvmHashIterNext(&iter))
-    {
-        const StackFrameEntry *stackFrameEntry;
-        const Method *method;
-        int pc;
-        const char *sourceFile;
-        ClassObject *clazz;
-        int lineNum;
-
-        hprofStartNewRecord(ctx, HPROF_TAG_STACK_FRAME, HPROF_TIME);
-
-        stackFrameEntry = (const StackFrameEntry *) dvmHashIterData(&iter);
-        assert(stackFrameEntry != NULL);
-
-        method = stackFrameEntry->frame.method;
-        pc = stackFrameEntry->frame.pc;
-        sourceFile = dvmGetMethodSourceFile(method);
-        if (sourceFile == NULL) {
-            sourceFile = "<unknown>";
-            lineNum = 0;
-        } else {
-            lineNum = dvmLineNumFromPC(method, pc);
-        }
-        clazz = (ClassObject *) hprofLookupClassId(method->clazz);
-
-        /* STACK FRAME format:
-         *
-         * ID:     ID for this stack frame
-         * ID:     ID for the method name
-         * ID:     ID for the method descriptor
-         * ID:     ID for the source file name
-         * u4:     class serial number
-         * u4:     line number, 0 = no line information
-         *
-         * We use the address of the stack frame as its ID.
-         */
-
-        DexStringCache cache;
-        const char* descriptor;
-
-        dexStringCacheInit(&cache);
-        descriptor = dexProtoGetMethodDescriptor(&method->prototype, &cache);
-
-        hprofAddIdToRecord(rec, (u4) stackFrameEntry);
-        hprofAddIdToRecord(rec, hprofLookupStringId(method->name));
-        hprofAddIdToRecord(rec, hprofLookupStringId(descriptor));
-        hprofAddIdToRecord(rec, hprofLookupStringId(sourceFile));
-        hprofAddU4ToRecord(rec, (u4) clazz->serialNumber);
-        hprofAddU4ToRecord(rec, (u4) lineNum);
-
-        dexStringCacheRelease(&cache);
-    }
-
-    dvmHashTableUnlock(gStackFrameHashTable);
-    return 0;
-}
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
index de2aec2..c671624 100644
--- a/vm/interp/Interp.c
+++ b/vm/interp/Interp.c
@@ -228,7 +228,7 @@
             LOGV("+++ increasing breakpoint set size to %d\n", newSize);
 
             /* pSet->breakpoints will be NULL on first entry */
-            newVec = realloc(pSet->breakpoints, newSize * sizeof(Breakpoint));
+            newVec = (Breakpoint*)realloc(pSet->breakpoints, newSize * sizeof(Breakpoint));
             if (newVec == NULL)
                 return false;
 
@@ -528,7 +528,7 @@
         saveArea = SAVEAREA_FROM_FP(fp);
         method = saveArea->method;
 
-        if (!dvmIsBreakFrame(fp) && !dvmIsNativeMethod(method))
+        if (!dvmIsBreakFrame((u4*)fp) && !dvmIsNativeMethod(method))
             break;
         prevFp = fp;
     }
@@ -545,7 +545,7 @@
          */
         LOGV("##### init step while in native method\n");
         fp = prevFp;
-        assert(!dvmIsBreakFrame(fp));
+        assert(!dvmIsBreakFrame((u4*)fp));
         assert(dvmIsNativeMethod(SAVEAREA_FROM_FP(fp)->method));
         saveArea = SAVEAREA_FROM_FP(fp);
     }
@@ -749,8 +749,7 @@
      */
     if (*switchData++ != kPackedSwitchSignature) {
         /* should have been caught by verifier */
-        dvmThrowException("Ljava/lang/InternalError;",
-            "bad packed switch magic");
+        dvmThrowInternalError("bad packed switch magic");
         return kInstrLen;
     }
 
@@ -804,8 +803,7 @@
 
     if (*switchData++ != kSparseSwitchSignature) {
         /* should have been caught by verifier */
-        dvmThrowException("Ljava/lang/InternalError;",
-            "bad sparse switch magic");
+        dvmThrowInternalError("bad sparse switch magic");
         return kInstrLen;
     }
 
@@ -917,7 +915,7 @@
     u4 size;
 
     if (arrayObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
     assert (!IS_CLASS_FLAG_SET(((Object *)arrayObj)->clazz,
@@ -934,7 +932,7 @@
      * Total size is 4+(width * size + 1)/2 16-bit code units.
      */
     if (arrayData[0] != kArrayDataSignature) {
-        dvmThrowException("Ljava/lang/InternalError;", "bad array data magic");
+        dvmThrowInternalError("bad array data magic");
         return false;
     }
 
@@ -942,7 +940,7 @@
     size = arrayData[2] | (((u4)arrayData[3]) << 16);
 
     if (size > arrayObj->length) {
-        dvmThrowAIOOBE(size, arrayObj->length);
+        dvmThrowArrayIndexOutOfBoundsException(arrayObj->length, size);
         return false;
     }
     copySwappedArrayData(arrayObj->contents, &arrayData[4], size, width);
@@ -994,8 +992,7 @@
     }
     if (i == thisClass->iftableCount) {
         /* impossible in verified DEX, need to check for it in unverified */
-        dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
-            "interface not implemented");
+        dvmThrowIncompatibleClassChangeError("interface not implemented");
         return NULL;
     }
 
@@ -1010,8 +1007,7 @@
 #if 0
     /* this can happen when there's a stale class file */
     if (dvmIsAbstractMethod(methodToCall)) {
-        dvmThrowException("Ljava/lang/AbstractMethodError;",
-            "interface method not implemented");
+        dvmThrowAbstractMethodError("interface method not implemented");
         return NULL;
     }
 #else
@@ -1158,43 +1154,43 @@
     const int typeMask = 0xff << kVerifyErrorRefTypeShift;
     VerifyError errorKind = kind & ~typeMask;
     VerifyErrorRefType refType = kind >> kVerifyErrorRefTypeShift;
-    const char* exceptionName = "Ljava/lang/VerifyError;";
+    ClassObject* exceptionClass = gDvm.exVerifyError;
     char* msg = NULL;
 
     switch ((VerifyError) errorKind) {
     case VERIFY_ERROR_NO_CLASS:
-        exceptionName = "Ljava/lang/NoClassDefFoundError;";
+        exceptionClass = gDvm.exNoClassDefFoundError;
         msg = classNameFromIndex(method, ref, refType, 0);
         break;
     case VERIFY_ERROR_NO_FIELD:
-        exceptionName = "Ljava/lang/NoSuchFieldError;";
+        exceptionClass = gDvm.exNoSuchFieldError;
         msg = fieldNameFromIndex(method, ref, refType, 0);
         break;
     case VERIFY_ERROR_NO_METHOD:
-        exceptionName = "Ljava/lang/NoSuchMethodError;";
+        exceptionClass = gDvm.exNoSuchMethodError;
         msg = methodNameFromIndex(method, ref, refType, 0);
         break;
     case VERIFY_ERROR_ACCESS_CLASS:
-        exceptionName = "Ljava/lang/IllegalAccessError;";
+        exceptionClass = gDvm.exIllegalAccessError;
         msg = classNameFromIndex(method, ref, refType,
             kThrowShow_accessFromClass);
         break;
     case VERIFY_ERROR_ACCESS_FIELD:
-        exceptionName = "Ljava/lang/IllegalAccessError;";
+        exceptionClass = gDvm.exIllegalAccessError;
         msg = fieldNameFromIndex(method, ref, refType,
             kThrowShow_accessFromClass);
         break;
     case VERIFY_ERROR_ACCESS_METHOD:
-        exceptionName = "Ljava/lang/IllegalAccessError;";
+        exceptionClass = gDvm.exIllegalAccessError;
         msg = methodNameFromIndex(method, ref, refType,
             kThrowShow_accessFromClass);
         break;
     case VERIFY_ERROR_CLASS_CHANGE:
-        exceptionName = "Ljava/lang/IncompatibleClassChangeError;";
+        exceptionClass = gDvm.exIncompatibleClassChangeError;
         msg = classNameFromIndex(method, ref, refType, 0);
         break;
     case VERIFY_ERROR_INSTANTIATION:
-        exceptionName = "Ljava/lang/InstantiationError;";
+        exceptionClass = gDvm.exInstantiationError;
         msg = classNameFromIndex(method, ref, refType, 0);
         break;
 
@@ -1210,11 +1206,86 @@
     /* no default clause -- want warning if enum updated */
     }
 
-    dvmThrowException(exceptionName, msg);
+    dvmThrowException(exceptionClass, msg);
     free(msg);
 }
 
 /*
+ * Update interpBreak
+ */
+void dvmUpdateInterpBreak(int newMode, bool enable)
+{
+    ExecutionSubModes oldValue, newValue;
+
+    do {
+        oldValue = gDvm.interpBreak;
+        newValue = enable ? oldValue | newMode : oldValue & ~newMode;
+    } while (android_atomic_release_cas(oldValue, newValue,
+            &gDvm.interpBreak) != 0);
+#if defined(WITH_JIT)
+    dvmCompilerStateRefresh();
+#endif
+}
+
+/*
+ * One-time initialization at thread creation.  Here we initialize
+ * useful constants.
+ */
+void dvmInitInterpreterState(Thread* self)
+{
+#if defined(WITH_JIT)
+    /* Interpreter entry points from compiled code */
+    extern void dvmJitToInterpNormal();
+    extern void dvmJitToInterpNoChain();
+    extern void dvmJitToInterpPunt();
+    extern void dvmJitToInterpSingleStep();
+    extern void dvmJitToInterpTraceSelect();
+#if defined(WITH_SELF_VERIFICATION)
+    extern void dvmJitToInterpBackwardBranch();
+#endif
+    /*
+     * Reserve a static entity here to quickly setup runtime contents as
+     * gcc will issue block copy instructions.
+     */
+    static struct JitToInterpEntries jitToInterpEntries = {
+        dvmJitToInterpNormal,
+        dvmJitToInterpNoChain,
+        dvmJitToInterpPunt,
+        dvmJitToInterpSingleStep,
+        dvmJitToInterpTraceSelect,
+#if defined(WITH_SELF_VERIFICATION)
+        dvmJitToInterpBackwardBranch,
+#else
+        NULL,
+#endif
+    };
+#endif
+
+    // Begin initialization
+    self->cardTable = gDvm.biasedCardTableBase;
+    self->interpSave.pInterpBreak = &gDvm.interpBreak;
+#if defined(WITH_JIT)
+    self->jitToInterpEntries = jitToInterpEntries;
+    self->icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN;
+    self->pJitProfTable = gDvmJit.pProfTable;
+    self->ppJitProfTable = &gDvmJit.pProfTable;
+    self->jitThreshold = gDvmJit.threshold;
+    self->pProfileCountdown = &gDvmJit.profileCountdown;
+#endif
+
+}
+
+/*
+ * Inter-instruction handler invoked in between instruction interpretations
+ * to handle exceptional events such as debugging housekeeping, instruction
+ * count profiling, JIT trace building, etc.
+ */
+void dvmCheckInst(u2 *dPC, Thread* self)
+{
+    //TODO add debugger, profiler, JIT, etc. checks here
+}
+
+/*
  * Main interpreter loop entry point.  Select "standard" or "debug"
  * interpreter and switch between them as required.
  *
@@ -1227,38 +1298,13 @@
  */
 void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
 {
-    InterpState interpState;
     bool change;
+    InterpSaveState interpSaveState;
 #if defined(WITH_JIT)
     /* Target-specific save/restore */
     extern void dvmJitCalleeSave(double *saveArea);
     extern void dvmJitCalleeRestore(double *saveArea);
-    /* Interpreter entry points from compiled code */
-    extern void dvmJitToInterpNormal();
-    extern void dvmJitToInterpNoChain();
-    extern void dvmJitToInterpPunt();
-    extern void dvmJitToInterpSingleStep();
-    extern void dvmJitToInterpTraceSelectNoChain();
-    extern void dvmJitToInterpTraceSelect();
-#if defined(WITH_SELF_VERIFICATION)
-    extern void dvmJitToInterpBackwardBranch();
-#endif
-
-    /*
-     * Reserve a static entity here to quickly setup runtime contents as
-     * gcc will issue block copy instructions.
-     */
-    static struct JitToInterpEntries jitToInterpEntries = {
-        dvmJitToInterpNormal,
-        dvmJitToInterpNoChain,
-        dvmJitToInterpPunt,
-        dvmJitToInterpSingleStep,
-        dvmJitToInterpTraceSelect,
-#if defined(WITH_SELF_VERIFICATION)
-        dvmJitToInterpBackwardBranch,
-#endif
-    };
-
+    double calleeSave[JIT_CALLEE_SAVE_DOUBLE_COUNT];
     /*
      * If the previous VM left the code cache through single-stepping the
      * inJitCodeCache flag will be set when the VM is re-entered (for example,
@@ -1268,28 +1314,26 @@
      */
 #endif
 
+    /*
+     * Save interpreter state from previous activation, linking
+     * new to last.
+     */
+    interpSaveState = self->interpSave;
+    self->interpSave.prev = &interpSaveState;
+#if defined(WITH_JIT)
+    dvmJitCalleeSave(calleeSave);
+#endif
+
 
 #if defined(WITH_TRACKREF_CHECKS)
-    interpState.debugTrackedRefStart =
+    self->interpSave.debugTrackedRefStart =
         dvmReferenceTableEntries(&self->internalLocalRefTable);
 #endif
-    interpState.debugIsMethodEntry = true;
+    self->debugIsMethodEntry = true;
 #if defined(WITH_JIT)
-    dvmJitCalleeSave(interpState.calleeSave);
+    dvmJitCalleeSave(calleeSave);
     /* Initialize the state to kJitNot */
-    interpState.jitState = kJitNot;
-
-    /* Setup the Jit-to-interpreter entry points */
-    interpState.jitToInterpEntries = jitToInterpEntries;
-
-    /*
-     * Initialize the threshold filter [don't bother to zero out the
-     * actual table.  We're looking for matches, and an occasional
-     * false positive is acceptible.
-     */
-    interpState.lastThreshFilter = 0;
-
-    interpState.icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN;
+    self->jitState = kJitNot;
 #endif
 
     /*
@@ -1297,15 +1341,15 @@
      *
      * No need to initialize "retval".
      */
-    interpState.method = method;
-    interpState.fp = (u4*) self->curFrame;
-    interpState.pc = method->insns;
-    interpState.entryPoint = kInterpEntryInstr;
+    self->interpSave.method = method;
+    self->interpSave.fp = (u4*) self->curFrame;
+    self->interpSave.pc = method->insns;
+    self->entryPoint = kInterpEntryInstr;
 
     if (dvmDebuggerOrProfilerActive())
-        interpState.nextMode = INTERP_DBG;
+        self->nextMode = INTERP_DBG;
     else
-        interpState.nextMode = INTERP_STD;
+        self->nextMode = INTERP_STD;
 
     assert(!dvmIsNativeMethod(method));
 
@@ -1322,7 +1366,7 @@
         dvmAbort();
     }
 
-    typedef bool (*Interpreter)(Thread*, InterpState*);
+    typedef bool (*Interpreter)(Thread*);
     Interpreter stdInterp;
     if (gDvm.executionMode == kExecutionModeInterpFast)
         stdInterp = dvmMterpStd;
@@ -1339,22 +1383,25 @@
 
     change = true;
     while (change) {
-        switch (interpState.nextMode) {
+        switch (self->nextMode) {
         case INTERP_STD:
             LOGVV("threadid=%d: interp STD\n", self->threadId);
-            change = (*stdInterp)(self, &interpState);
+            change = (*stdInterp)(self);
             break;
         case INTERP_DBG:
             LOGVV("threadid=%d: interp DBG\n", self->threadId);
-            change = dvmInterpretDbg(self, &interpState);
+            change = dvmInterpretDbg(self);
             break;
         default:
             dvmAbort();
         }
     }
 
-    *pResult = interpState.retval;
+    *pResult = self->retval;
+
+    /* Restore interpreter state from previous activation */
+    self->interpSave = interpSaveState;
 #if defined(WITH_JIT)
-    dvmJitCalleeRestore(interpState.calleeSave);
+    dvmJitCalleeRestore(calleeSave);
 #endif
 }
diff --git a/vm/interp/Interp.h b/vm/interp/Interp.h
index 784fc7a..098429c 100644
--- a/vm/interp/Interp.h
+++ b/vm/interp/Interp.h
@@ -39,6 +39,7 @@
  */
 bool dvmBreakpointStartup(void);
 void dvmBreakpointShutdown(void);
+void dvmInitInterpreterState(Thread* self);
 
 /*
  * Breakpoint implementation.
@@ -59,4 +60,14 @@
  */
 void dvmFlushBreakpoints(ClassObject* clazz);
 
+/*
+ * Update interpBreak
+ */
+void dvmUpdateInterpBreak(int newMode, bool enable);
+
+#ifndef DVM_NO_ASM_INTERP
+extern void* dvmAsmInstructionStart[];
+extern void* dvmAsmAltInstructionStart[];
+#endif
+
 #endif /*_DALVIK_INTERP_INTERP*/
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
index 108620f..10a77a2 100644
--- a/vm/interp/InterpDefs.h
+++ b/vm/interp/InterpDefs.h
@@ -24,56 +24,7 @@
 #ifndef _DALVIK_INTERP_DEFS
 #define _DALVIK_INTERP_DEFS
 
-
-/*
- * Specify the starting point when switching between interpreters.
- */
-typedef enum InterpEntry {
-    kInterpEntryInstr = 0,      // continue to next instruction
-    kInterpEntryReturn = 1,     // jump to method return
-    kInterpEntryThrow = 2,      // jump to exception throw
 #if defined(WITH_JIT)
-    kInterpEntryResume = 3,     // Resume after single-step
-#endif
-} InterpEntry;
-
-#if defined(WITH_JIT)
-/*
- * NOTE: Only entry points dispatched via [&InterpState + #offset] are put
- * in this struct, and there are six of them:
- * 1) dvmJitToInterpNormal: find if there is a corresponding compilation for
- *    the new dalvik PC. If so, chain the originating compilation with the
- *    target then jump to it. If the destination trace doesn't exist, update
- *    the profile count for that Dalvik PC.
- * 2) dvmJitToInterpNoChain: similar to dvmJitToInterpNormal but chaining is
- *    not performed.
- * 3) dvmJitToInterpPunt: use the fast interpreter to execute the next
- *    instruction(s) and stay there as long as it is appropriate to return
- *    to the compiled land. This is used when the jit'ed code is about to
- *    throw an exception.
- * 4) dvmJitToInterpSingleStep: use the portable interpreter to execute the
- *    next instruction only and return to pre-specified location in the
- *    compiled code to resume execution. This is mainly used as debugging
- *    feature to bypass problematic opcode implementations without
- *    disturbing the trace formation.
- * 5) dvmJitToTraceSelect: Similar to dvmJitToInterpNormal except for the
- *    profiling operation. If the new Dalvik PC is dominated by an already
- *    translated trace, directly request a new translation if the destinaion
- *    trace doesn't exist.
- * 6) dvmJitToBackwardBranch: special case for SELF_VERIFICATION when the
- *    destination Dalvik PC is included by the trace itself.
- */
-struct JitToInterpEntries {
-    void *dvmJitToInterpNormal;
-    void *dvmJitToInterpNoChain;
-    void *dvmJitToInterpPunt;
-    void *dvmJitToInterpSingleStep;
-    void *dvmJitToInterpTraceSelect;
-#if defined(WITH_SELF_VERIFICATION)
-    void *dvmJitToInterpBackwardBranch;
-#endif
-};
-
 /*
  * Size of save area for callee-save FP regs, which are not automatically
  * saved by interpreter main because it doesn't use them (but Jit'd code
@@ -82,114 +33,20 @@
  */
 #define JIT_CALLEE_SAVE_DOUBLE_COUNT 8
 
-/* Number of entries in the 2nd level JIT profiler filter cache */
-#define JIT_TRACE_THRESH_FILTER_SIZE 32
-/* Number of low dalvik pc address bits to include in 2nd level filter key */
-#define JIT_TRACE_THRESH_FILTER_PC_BITS 4
 #endif
 
 /*
- * Interpreter context, used when switching from one interpreter to
- * another.  We also tuck "mterp" state in here.
+ * Portable interpreter.
  */
-typedef struct InterpState {
-    /*
-     * To make some mterp state updates easier, "pc" and "fp" MUST come
-     * first and MUST appear in this order.
-     */
-    const u2*   pc;                     // program counter
-    u4*         fp;                     // frame pointer
-
-    JValue      retval;                 // return value -- "out" only
-    const Method* method;               // method being executed
-
-
-    /* ----------------------------------------------------------------------
-     * Mterp-only state
-     */
-    DvmDex*         methodClassDex;
-    Thread*         self;
-
-    /* housekeeping */
-    void*           bailPtr;
-
-    /*
-     * These are available globally, from gDvm, or from another glue field
-     * (self/method).  They're copied in here for speed.
-     */
-    /* copy of self->interpStackEnd */
-    const u1*       interpStackEnd;
-    /* points at self->suspendCount */
-    volatile int*   pSelfSuspendCount;
-    /* Biased base of GC's card table */
-    u1*             cardTable;
-    /* points at gDvm.debuggerActive, or NULL if debugger not enabled */
-    volatile u1*    pDebuggerActive;
-    /* points at gDvm.activeProfilers */
-    volatile int*   pActiveProfilers;
-    /* ----------------------------------------------------------------------
-     */
-
-    /*
-     * Interpreter switching.
-     */
-    InterpEntry entryPoint;             // what to do when we start
-    int         nextMode;               // INTERP_STD, INTERP_DBG
-
-#if defined(WITH_JIT)
-    /*
-     * Local copies of field from gDvm placed here for fast access
-     */
-    unsigned char*     pJitProfTable;
-    JitState           jitState;
-    const void*        jitResumeNPC;    // Native PC of compiled code
-    const u2*          jitResumeDPC;    // Dalvik PC corresponding to NPC
-    int                jitThreshold;
-    /*
-     * ppJitProfTable holds the address of gDvmJit.pJitProfTable, which
-     * doubles as an on/off switch for the Jit.  Because a change in
-     * the value of gDvmJit.pJitProfTable isn't reflected in the cached
-     * copy above (pJitProfTable), we need to periodically refresh it.
-     * ppJitProfTable is used for that purpose.
-     */
-    unsigned char**    ppJitProfTable; // Used to refresh pJitProfTable
-    int                icRechainCount; // Count down to next rechain request
-#endif
-
-    bool        debugIsMethodEntry;     // used for method entry event triggers
-#if defined(WITH_TRACKREF_CHECKS)
-    int         debugTrackedRefStart;   // tracked refs from prior invocations
-#endif
-
-#if defined(WITH_JIT)
-    struct JitToInterpEntries jitToInterpEntries;
-
-    int currTraceRun;
-    int totalTraceLen;        // Number of Dalvik insts in trace
-    const u2* currTraceHead;  // Start of the trace we're building
-    const u2* currRunHead;    // Start of run we're building
-    int currRunLen;           // Length of run in 16-bit words
-    int lastThreshFilter;
-    const u2* lastPC;         // Stage the PC first for the threaded interpreter
-    intptr_t threshFilter[JIT_TRACE_THRESH_FILTER_SIZE];
-    JitTraceRun trace[MAX_JIT_RUN_LEN];
-    double calleeSave[JIT_CALLEE_SAVE_DOUBLE_COUNT];
-#endif
-
-} InterpState;
-
-/*
- * These are generated from InterpCore.h.
- */
-extern bool dvmInterpretDbg(Thread* self, InterpState* interpState);
-extern bool dvmInterpretStd(Thread* self, InterpState* interpState);
+extern bool dvmInterpretDbg(Thread* self);
+extern bool dvmInterpretStd(Thread* self);
 #define INTERP_STD 0
 #define INTERP_DBG 1
 
 /*
  * "mterp" interpreter.
  */
-extern bool dvmMterpStd(Thread* self, InterpState* interpState);
+extern bool dvmMterpStd(Thread* self);
 
 /*
  * Get the "this" pointer from the current frame.
@@ -226,11 +83,9 @@
  */
 static inline bool dvmDebuggerOrProfilerActive(void)
 {
-    bool result = gDvm.debuggerActive;
-#if !defined(WITH_INLINE_PROFILING)
-    result = result || (gDvm.activeProfilers != 0);
-#endif
-    return result;
+    return gDvm.interpBreak & (kSubModeDebuggerActive |
+                               kSubModeEmulatorTrace |
+                               kSubModeInstCounting);
 }
 
 #if defined(WITH_JIT)
@@ -240,11 +95,7 @@
  */
 static inline bool dvmJitDebuggerOrProfilerActive()
 {
-    bool result = (gDvmJit.pProfTable != NULL) || gDvm.debuggerActive;
-#if !defined(WITH_INLINE_PROFILING)
-    result = result || (gDvm.activeProfilers != 0);
-#endif
-    return result;
+    return (gDvmJit.pProfTable != NULL) || dvmDebuggerOrProfilerActive();
 }
 
 /*
diff --git a/vm/interp/InterpState.h b/vm/interp/InterpState.h
new file mode 100644
index 0000000..1642594
--- /dev/null
+++ b/vm/interp/InterpState.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik interpreter definitions.  These are internal to the interpreter.
+ *
+ * This includes defines, types, function declarations, and inline functions
+ * that are common to all interpreter implementations.
+ *
+ * Functions and globals declared here are defined in Interp.c.
+ */
+#ifndef _DALVIK_INTERP_STATE
+#define _DALVIK_INTERP_STATE
+
+/*
+ * Specify the starting point when switching between interpreters.
+ */
+typedef enum InterpEntry {
+    kInterpEntryInstr = 0,      // continue to next instruction
+    kInterpEntryReturn = 1,     // jump to method return
+    kInterpEntryThrow = 2,      // jump to exception throw
+#if defined(WITH_JIT)
+    kInterpEntryResume = 3,     // Resume after single-step
+#endif
+} InterpEntry;
+
+typedef struct InterpSaveState {
+    const u2*       pc;         // Dalvik PC
+    u4*             fp;         // Dalvik frame pointer
+    const Method    *method;    // Method being executed
+    DvmDex*         methodClassDex;
+    void*           bailPtr;
+    volatile int*   pInterpBreak;  // FIXME - use directly
+#if defined(WITH_TRACKREF_CHECKS)
+    int             debugTrackedRefStart;
+#else
+    int             unused;        // Keep struct size constant
+#endif
+    struct InterpSaveState* prev;  // To follow nested activations
+} InterpSaveState;
+
+#ifdef WITH_JIT
+/*
+ * NOTE: Only entry points dispatched via [self + #offset] are put
+ * in this struct, and there are six of them:
+ * 1) dvmJitToInterpNormal: find if there is a corresponding compilation for
+ *    the new dalvik PC. If so, chain the originating compilation with the
+ *    target then jump to it. If the destination trace doesn't exist, update
+ *    the profile count for that Dalvik PC.
+ * 2) dvmJitToInterpNoChain: similar to dvmJitToInterpNormal but chaining is
+ *    not performed.
+ * 3) dvmJitToInterpPunt: use the fast interpreter to execute the next
+ *    instruction(s) and stay there as long as it is appropriate to return
+ *    to the compiled land. This is used when the jit'ed code is about to
+ *    throw an exception.
+ * 4) dvmJitToInterpSingleStep: use the portable interpreter to execute the
+ *    next instruction only and return to pre-specified location in the
+ *    compiled code to resume execution. This is mainly used as debugging
+ *    feature to bypass problematic opcode implementations without
+ *    disturbing the trace formation.
+ * 5) dvmJitToTraceSelect: Similar to dvmJitToInterpNormal except for the
+ *    profiling operation. If the new Dalvik PC is dominated by an already
+ *    translated trace, directly request a new translation if the destinaion
+ *    trace doesn't exist.
+ * 6) dvmJitToBackwardBranch: special case for SELF_VERIFICATION when the
+ *    destination Dalvik PC is included by the trace itself.
+ */
+struct JitToInterpEntries {
+    void *dvmJitToInterpNormal;
+    void *dvmJitToInterpNoChain;
+    void *dvmJitToInterpPunt;
+    void *dvmJitToInterpSingleStep;
+    void *dvmJitToInterpTraceSelect;
+#if defined(WITH_SELF_VERIFICATION)
+    void *dvmJitToInterpBackwardBranch;
+#else
+    void *unused;  // Keep structure size constant
+#endif
+};
+
+/* States of the dbg interpreter when serving a JIT-related request */
+typedef enum JitState {
+    /* Entering states in the debug interpreter */
+    kJitNot = 0,               // Non-JIT related reasons */
+    kJitTSelectRequest = 1,    // Request a trace (subject to filtering)
+    kJitTSelectRequestHot = 2, // Request a hot trace (bypass the filter)
+    kJitSelfVerification = 3,  // Self Verification Mode
+
+    /* Operational states in the debug interpreter */
+    kJitTSelect = 4,           // Actively selecting a trace
+    kJitTSelectEnd = 5,        // Done with the trace - wrap it up
+    kJitSingleStep = 6,        // Single step interpretation
+    kJitSingleStepEnd = 7,     // Done with single step, ready return to mterp
+    kJitDone = 8,              // Ready to leave the debug interpreter
+} JitState;
+
+#if defined(WITH_SELF_VERIFICATION)
+typedef enum SelfVerificationState {
+    kSVSIdle = 0,           // Idle
+    kSVSStart = 1,          // Shadow space set up, running compiled code
+    kSVSPunt = 2,           // Exiting compiled code by punting
+    kSVSSingleStep = 3,     // Exiting compiled code by single stepping
+    kSVSNoProfile = 4,      // Exiting compiled code and don't collect profiles
+    kSVSTraceSelect = 5,    // Exiting compiled code and compile the next pc
+    kSVSNormal = 6,         // Exiting compiled code normally
+    kSVSNoChain = 7,        // Exiting compiled code by no chain
+    kSVSBackwardBranch = 8, // Exiting compiled code with backward branch trace
+    kSVSDebugInterp = 9,    // Normal state restored, running debug interpreter
+} SelfVerificationState;
+#endif
+
+/* Number of entries in the 2nd level JIT profiler filter cache */
+#define JIT_TRACE_THRESH_FILTER_SIZE 32
+/* Number of low dalvik pc address bits to include in 2nd level filter key */
+#define JIT_TRACE_THRESH_FILTER_PC_BITS 4
+#define MAX_JIT_RUN_LEN 64
+
+typedef enum JitHint {
+   kJitHintNone = 0,
+   kJitHintTaken = 1,         // Last inst in run was taken branch
+   kJitHintNotTaken = 2,      // Last inst in run was not taken branch
+   kJitHintNoBias = 3,        // Last inst in run was unbiased branch
+} jitHint;
+
+/*
+ * Element of a Jit trace description. If the isCode bit is set, it describes
+ * a contiguous sequence of Dalvik byte codes.
+ */
+typedef struct {
+    unsigned numInsts:8;     // Number of Byte codes in run
+    unsigned runEnd:1;       // Run ends with last byte code
+    jitHint  hint:7;         // Hint to apply to final code of run
+    u2    startOffset;       // Starting offset for trace run
+} JitCodeDesc;
+
+/*
+ * A complete list of trace runs passed to the compiler looks like the
+ * following:
+ *   frag1
+ *   frag2
+ *   frag3
+ *   meta1
+ *     :
+ *   metan
+ *   frag4
+ *
+ * frags 1-4 have the "isCode" field set and describe the location/length of
+ * real code traces, while metas 1-n are misc information.
+ * The meaning of the meta content is loosely defined. It is usually the code
+ * fragment right before the first meta field (frag3 in this case) to
+ * understand and parse them. Frag4 could be a dummy one with 0 "numInsts" but
+ * the "runEnd" field set.
+ *
+ * For example, if a trace run contains a method inlining target, the class
+ * descriptor/loader of "this" and the currently resolved method pointer are
+ * three instances of meta information stored there.
+ */
+typedef struct {
+    union {
+        JitCodeDesc frag;
+        void*       meta;
+    } info;
+    u4 isCode:1;
+    u4 unused:31;
+} JitTraceRun;
+
+#endif
+
+#endif /*_DALVIK_INTERP_STATE*/
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index 3fafe6d..1a5a10d 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -56,16 +56,24 @@
 }
 
 /*
- * Save out PC, FP, InterpState, and registers to shadow space.
+ * Save out PC, FP, thread state, and registers to shadow space.
  * Return a pointer to the shadow space for JIT to use.
+ *
+ * The set of saved state from the Thread structure is:
+ *     pc  (Dalvik PC)
+ *     fp  (Dalvik FP)
+ *     retval
+ *     method
+ *     methodClassDex
+ *     interpStackEnd
  */
-void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
-                                   InterpState* interpState, int targetTrace)
+void* dvmSelfVerificationSaveState(const u2* pc, u4* fp,
+                                   Thread* self, int targetTrace)
 {
-    Thread *self = dvmThreadSelf();
     ShadowSpace *shadowSpace = self->shadowSpace;
-    unsigned preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
-    unsigned postBytes = interpState->method->registersSize*4;
+    unsigned preBytes = self->interpSave.method->outsSize*4 +
+        sizeof(StackSaveArea);
+    unsigned postBytes = self->interpSave.method->registersSize*4;
 
     //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
     //    self->threadId, (int)pc, (int)fp);
@@ -78,16 +86,16 @@
     }
     shadowSpace->selfVerificationState = kSVSStart;
 
-    if (interpState->entryPoint == kInterpEntryResume) {
-        interpState->entryPoint = kInterpEntryInstr;
+    if (self->entryPoint == kInterpEntryResume) {
+        self->entryPoint = kInterpEntryInstr;
 #if 0
         /* Tracking the success rate of resume after single-stepping */
-        if (interpState->jitResumeDPC == pc) {
+        if (self->jitResumeDPC == pc) {
             LOGD("SV single step resumed at %p", pc);
         }
         else {
-            LOGD("real %p DPC %p NPC %p", pc, interpState->jitResumeDPC,
-                 interpState->jitResumeNPC);
+            LOGD("real %p DPC %p NPC %p", pc, self->jitResumeDPC,
+                 self->jitResumeNPC);
         }
 #endif
     }
@@ -103,19 +111,21 @@
     // Remember original state
     shadowSpace->startPC = pc;
     shadowSpace->fp = fp;
-    shadowSpace->glue = interpState;
+    shadowSpace->retval = self->retval;
+    shadowSpace->interpStackEnd = self->interpStackEnd;
+
     /*
      * Store the original method here in case the trace ends with a
      * return/invoke, the last method.
      */
-    shadowSpace->method = interpState->method;
+    shadowSpace->method = self->interpSave.method;
+    shadowSpace->methodClassDex = self->interpSave.methodClassDex;
+
     shadowSpace->shadowFP = shadowSpace->registerSpace +
                             shadowSpace->registerSpaceSize - postBytes/4;
 
-    // Create a copy of the InterpState
-    memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
-    shadowSpace->interpState.fp = shadowSpace->shadowFP;
-    shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
+    self->interpSave.fp = (u4*)shadowSpace->shadowFP;
+    self->interpStackEnd = (u1*)shadowSpace->registerSpace;
 
     // Create a copy of the stack
     memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
@@ -134,13 +144,11 @@
  * Save ending PC, FP and compiled code exit point to shadow space.
  * Return a pointer to the shadow space for JIT to restore state.
  */
-void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
-                                      SelfVerificationState exitState)
+void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp,
+                                      SelfVerificationState exitState,
+                                      Thread* self)
 {
-    Thread *self = dvmThreadSelf();
     ShadowSpace *shadowSpace = self->shadowSpace;
-    // Official InterpState structure
-    InterpState *realGlue = shadowSpace->glue;
     shadowSpace->endPC = pc;
     shadowSpace->endShadowFP = fp;
     shadowSpace->jitExitState = exitState;
@@ -160,16 +168,6 @@
             (int)shadowSpace->endShadowFP);
     }
 
-    // Move the resume [ND]PC from the shadow space to the real space so that
-    // the debug interpreter can return to the translation
-    if (exitState == kSVSSingleStep) {
-        realGlue->jitResumeNPC = shadowSpace->interpState.jitResumeNPC;
-        realGlue->jitResumeDPC = shadowSpace->interpState.jitResumeDPC;
-    } else {
-        realGlue->jitResumeNPC = NULL;
-        realGlue->jitResumeDPC = NULL;
-    }
-
     // Special case when punting after a single instruction
     if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
         shadowSpace->selfVerificationState = kSVSIdle;
@@ -200,6 +198,14 @@
         shadowSpace->selfVerificationState = exitState;
     }
 
+    /* Restore state before returning */
+    self->interpSave.pc = shadowSpace->startPC;
+    self->interpSave.fp = shadowSpace->fp;
+    self->interpSave.method = shadowSpace->method;
+    self->interpSave.methodClassDex = shadowSpace->methodClassDex;
+    self->retval = shadowSpace->retval;
+    self->interpStackEnd = shadowSpace->interpStackEnd;
+
     return shadowSpace;
 }
 
@@ -223,7 +229,7 @@
                      (int) shadowSpace->shadowFP;
     int localRegs = 0;
     int frameBytes2 = 0;
-    if (self->curFrame < shadowSpace->fp) {
+    if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
         localRegs = (stackSave->method->registersSize -
                      stackSave->method->insSize)*4;
         frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
@@ -281,8 +287,7 @@
 }
 
 /* Manage self verification while in the debug interpreter */
-static bool selfVerificationDebugInterp(const u2* pc, Thread* self,
-                                        InterpState *interpState)
+static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
 {
     ShadowSpace *shadowSpace = self->shadowSpace;
     SelfVerificationState state = shadowSpace->selfVerificationState;
@@ -337,7 +342,7 @@
             selfVerificationSpinLoop(shadowSpace);
         }
         /* Check new frame if it exists (invokes only) */
-        if (self->curFrame < shadowSpace->fp) {
+        if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
             StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
             int localRegs = (stackSave->method->registersSize -
                              stackSave->method->insSize)*4;
@@ -385,7 +390,7 @@
          * one more instruction
          */
         if (state == kSVSSingleStep) {
-            interpState->jitState = kJitSingleStepEnd;
+            self->jitState = kJitSingleStepEnd;
         }
         return true;
 
@@ -418,7 +423,7 @@
     /*
      * Note 1: This won't necessarily stop all translation requests, and
      * operates on a delayed mechanism.  Running threads look to the copy
-     * of this value in their private InterpState structures and won't see
+     * of this value in their private thread structures and won't see
      * this change until it is refreshed (which happens on interpreter
      * entry).
      * Note 2: This is a one-shot memory leak on this table. Because this is a
@@ -506,60 +511,56 @@
              gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
         LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
         LOGD("JIT: Avg unit compilation time: %llu us",
+             gDvmJit.numCompilations == 0 ? 0 :
              gDvmJit.jitTime / gDvmJit.numCompilations);
+        LOGD("JIT: Potential GC blocked by compiler: max %llu us / "
+             "avg %llu us (%d)",
+             gDvmJit.maxCompilerThreadBlockGCTime,
+             gDvmJit.numCompilerThreadBlockGC == 0 ?
+                 0 : gDvmJit.compilerThreadBlockGCTime /
+                     gDvmJit.numCompilerThreadBlockGC,
+             gDvmJit.numCompilerThreadBlockGC);
 #endif
 
         LOGD("JIT: %d Translation chains, %d interp stubs",
              gDvmJit.translationChains, stubs);
-        if (gDvmJit.profile) {
+        if (gDvmJit.profileMode == kTraceProfilingContinuous) {
             dvmCompilerSortAndPrintTraceProfiles();
         }
     }
 }
 
 
-static void setTraceConstruction(JitEntry *slot, bool value)
+/* End current trace after last successful instruction */
+void dvmJitEndTraceSelect(Thread* self)
 {
-
-    JitEntryInfoUnion oldValue;
-    JitEntryInfoUnion newValue;
-    do {
-        oldValue = slot->u;
-        newValue = oldValue;
-        newValue.info.traceConstruction = value;
-    } while (android_atomic_release_cas(oldValue.infoWord, newValue.infoWord,
-            &slot->u.infoWord) != 0);
-}
-
-static void resetTracehead(InterpState* interpState, JitEntry *slot)
-{
-    slot->codeAddress = dvmCompilerGetInterpretTemplate();
-    setTraceConstruction(slot, false);
-}
-
-/* Clean up any pending trace builds */
-void dvmJitAbortTraceSelect(InterpState* interpState)
-{
-    if (interpState->jitState == kJitTSelect)
-        interpState->jitState = kJitDone;
+    if (self->jitState == kJitTSelect)
+        self->jitState = kJitTSelectEnd;
 }
 
 /*
  * Find an entry in the JitTable, creating if necessary.
  * Returns null if table is full.
  */
-static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked)
+static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked,
+                              bool isMethodEntry)
 {
     u4 chainEndMarker = gDvmJit.jitTableSize;
     u4 idx = dvmJitHash(dPC);
 
-    /* Walk the bucket chain to find an exact match for our PC */
+    /*
+     * Walk the bucket chain to find an exact match for our PC and trace/method
+     * type
+     */
     while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
-           (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
+           ((gDvmJit.pJitEntryTable[idx].dPC != dPC) ||
+            (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry !=
+             isMethodEntry))) {
         idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
     }
 
-    if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
+    if (gDvmJit.pJitEntryTable[idx].dPC != dPC ||
+        gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) {
         /*
          * No match.  Aquire jitTableLock and find the last
          * slot in the chain. Possibly continue the chain walk in case
@@ -578,7 +579,9 @@
         if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
             u4 prev;
             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
-                if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
+                if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
+                    gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
+                        isMethodEntry) {
                     /* Another thread got there first for this dPC */
                     if (!callerLocked)
                         dvmUnlockMutex(&gDvmJit.tableLock);
@@ -617,10 +620,13 @@
             }
         }
         if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
+            gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry;
             /*
              * Initialize codeAddress and allocate the slot.  Must
              * happen in this order (since dPC is set, the entry is live.
              */
+            android_atomic_release_store((int32_t)dPC,
+                 (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC);
             gDvmJit.pJitEntryTable[idx].dPC = dPC;
             gDvmJit.jitTableEntriesUsed++;
         } else {
@@ -640,15 +646,24 @@
  *  + thisClass (new)
  *  + calleeMethod (new)
  */
-static void insertClassMethodInfo(InterpState* interpState,
+static void insertClassMethodInfo(Thread* self,
                                   const ClassObject* thisClass,
                                   const Method* calleeMethod,
                                   const DecodedInstruction* insn)
 {
-    int currTraceRun = ++interpState->currTraceRun;
-    interpState->trace[currTraceRun].meta = (void *) thisClass;
-    currTraceRun = ++interpState->currTraceRun;
-    interpState->trace[currTraceRun].meta = (void *) calleeMethod;
+    int currTraceRun = ++self->currTraceRun;
+    self->trace[currTraceRun].info.meta = thisClass ?
+                                    (void *) thisClass->descriptor : NULL;
+    self->trace[currTraceRun].isCode = false;
+
+    currTraceRun = ++self->currTraceRun;
+    self->trace[currTraceRun].info.meta = thisClass ?
+                                    (void *) thisClass->classLoader : NULL;
+    self->trace[currTraceRun].isCode = false;
+
+    currTraceRun = ++self->currTraceRun;
+    self->trace[currTraceRun].info.meta = (void *) calleeMethod;
+    self->trace[currTraceRun].isCode = false;
 }
 
 /*
@@ -664,7 +679,7 @@
  * lastPC, len, offset are all from the preceding invoke instruction
  */
 static void insertMoveResult(const u2 *lastPC, int len, int offset,
-                             InterpState *interpState)
+                             Thread *self)
 {
     DecodedInstruction nextDecInsn;
     const u2 *moveResultPC = lastPC + len;
@@ -676,16 +691,16 @@
         return;
 
     /* We need to start a new trace run */
-    int currTraceRun = ++interpState->currTraceRun;
-    interpState->currRunHead = moveResultPC;
-    interpState->trace[currTraceRun].frag.startOffset = offset + len;
-    interpState->trace[currTraceRun].frag.numInsts = 1;
-    interpState->trace[currTraceRun].frag.runEnd = false;
-    interpState->trace[currTraceRun].frag.hint = kJitHintNone;
-    interpState->trace[currTraceRun].frag.isCode = true;
-    interpState->totalTraceLen++;
+    int currTraceRun = ++self->currTraceRun;
+    self->currRunHead = moveResultPC;
+    self->trace[currTraceRun].info.frag.startOffset = offset + len;
+    self->trace[currTraceRun].info.frag.numInsts = 1;
+    self->trace[currTraceRun].info.frag.runEnd = false;
+    self->trace[currTraceRun].info.frag.hint = kJitHintNone;
+    self->trace[currTraceRun].isCode = true;
+    self->totalTraceLen++;
 
-    interpState->currRunLen = dexGetWidthFromInstruction(moveResultPC);
+    self->currRunLen = dexGetWidthFromInstruction(moveResultPC);
 }
 
 /*
@@ -704,8 +719,8 @@
  * because returns cannot throw in a way that causes problems for the
  * translated code.
  */
-int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState,
-                const ClassObject* thisClass, const Method* curMethod)
+int dvmCheckJit(const u2* pc, Thread* self, const ClassObject* thisClass,
+                const Method* curMethod)
 {
     int flags, len;
     int switchInterp = false;
@@ -719,13 +734,13 @@
      * Reset the entry point to the default value. If needed it will be set to a
      * specific value in the corresponding case statement (eg kJitSingleStepEnd)
      */
-    interpState->entryPoint = kInterpEntryInstr;
+    self->entryPoint = kInterpEntryInstr;
 
     /* Prepare to handle last PC and stage the current PC */
-    const u2 *lastPC = interpState->lastPC;
-    interpState->lastPC = pc;
+    const u2 *lastPC = self->lastPC;
+    self->lastPC = pc;
 
-    switch (interpState->jitState) {
+    switch (self->jitState) {
         int offset;
         DecodedInstruction decInsn;
         case kJitTSelect:
@@ -739,10 +754,10 @@
              * to the amount of space it takes to generate the chaining
              * cells.
              */
-            if (interpState->totalTraceLen != 0 &&
+            if (self->totalTraceLen != 0 &&
                 (decInsn.opcode == OP_PACKED_SWITCH ||
                  decInsn.opcode == OP_SPARSE_SWITCH)) {
-                interpState->jitState = kJitTSelectEnd;
+                self->jitState = kJitTSelectEnd;
                 break;
             }
 
@@ -752,24 +767,24 @@
 #endif
             flags = dexGetFlagsFromOpcode(decInsn.opcode);
             len = dexGetWidthFromInstruction(lastPC);
-            offset = lastPC - interpState->method->insns;
+            offset = lastPC - self->interpSave.method->insns;
             assert((unsigned) offset <
-                   dvmGetMethodInsnsSize(interpState->method));
-            if (lastPC != interpState->currRunHead + interpState->currRunLen) {
+                   dvmGetMethodInsnsSize(self->interpSave.method));
+            if (lastPC != self->currRunHead + self->currRunLen) {
                 int currTraceRun;
                 /* We need to start a new trace run */
-                currTraceRun = ++interpState->currTraceRun;
-                interpState->currRunLen = 0;
-                interpState->currRunHead = (u2*)lastPC;
-                interpState->trace[currTraceRun].frag.startOffset = offset;
-                interpState->trace[currTraceRun].frag.numInsts = 0;
-                interpState->trace[currTraceRun].frag.runEnd = false;
-                interpState->trace[currTraceRun].frag.hint = kJitHintNone;
-                interpState->trace[currTraceRun].frag.isCode = true;
+                currTraceRun = ++self->currTraceRun;
+                self->currRunLen = 0;
+                self->currRunHead = (u2*)lastPC;
+                self->trace[currTraceRun].info.frag.startOffset = offset;
+                self->trace[currTraceRun].info.frag.numInsts = 0;
+                self->trace[currTraceRun].info.frag.runEnd = false;
+                self->trace[currTraceRun].info.frag.hint = kJitHintNone;
+                self->trace[currTraceRun].isCode = true;
             }
-            interpState->trace[interpState->currTraceRun].frag.numInsts++;
-            interpState->totalTraceLen++;
-            interpState->currRunLen += len;
+            self->trace[self->currTraceRun].info.frag.numInsts++;
+            self->totalTraceLen++;
+            self->currRunLen += len;
 
             /*
              * If the last instruction is an invoke, we will try to sneak in
@@ -778,9 +793,9 @@
             int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
 
             /* Will probably never hit this with the current trace buildier */
-            if (interpState->currTraceRun ==
+            if (self->currTraceRun ==
                 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
-                interpState->jitState = kJitTSelectEnd;
+                self->jitState = kJitTSelectEnd;
             }
 
             if (!dexIsGoto(flags) &&
@@ -788,7 +803,7 @@
                              kInstrCanSwitch |
                              kInstrCanReturn |
                              kInstrInvoke)) != 0)) {
-                    interpState->jitState = kJitTSelectEnd;
+                    self->jitState = kJitTSelectEnd;
 #if defined(SHOW_TRACE)
                 LOGD("TraceGen: ending on %s, basic block end",
                      dexGetOpcodeName(decInsn.opcode));
@@ -801,21 +816,21 @@
                  * it to the trace too.
                  */
                 if (flags & kInstrInvoke) {
-                    insertClassMethodInfo(interpState, thisClass, curMethod,
+                    insertClassMethodInfo(self, thisClass, curMethod,
                                           &decInsn);
-                    insertMoveResult(lastPC, len, offset, interpState);
+                    insertMoveResult(lastPC, len, offset, self);
                 }
             }
             /* Break on throw or self-loop */
             if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){
-                interpState->jitState = kJitTSelectEnd;
+                self->jitState = kJitTSelectEnd;
             }
-            if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
-                interpState->jitState = kJitTSelectEnd;
+            if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) {
+                self->jitState = kJitTSelectEnd;
             }
              /* Abandon the trace request if debugger/profiler is attached */
             if (debugOrProfile) {
-                interpState->jitState = kJitDone;
+                self->jitState = kJitDone;
                 break;
             }
             if ((flags & kInstrCanReturn) != kInstrCanReturn) {
@@ -836,49 +851,52 @@
             /* NOTE: intentional fallthrough for returns */
         case kJitTSelectEnd:
             {
-                /* Bad trace */
-                if (interpState->totalTraceLen == 0) {
-                    /* Bad trace - mark as untranslatable */
-                    interpState->jitState = kJitDone;
+                /* Empty trace - set to bail to interpreter */
+                if (self->totalTraceLen == 0) {
+                    dvmJitSetCodeAddr(self->currTraceHead,
+                                      dvmCompilerGetInterpretTemplate(),
+                                      dvmCompilerGetInterpretTemplateSet(),
+                                      false /* Not method entry */, 0);
+                    self->jitState = kJitDone;
                     switchInterp = true;
                     break;
                 }
 
-                int lastTraceDesc = interpState->currTraceRun;
+                int lastTraceDesc = self->currTraceRun;
 
                 /* Extend a new empty desc if the last slot is meta info */
-                if (!interpState->trace[lastTraceDesc].frag.isCode) {
-                    lastTraceDesc = ++interpState->currTraceRun;
-                    interpState->trace[lastTraceDesc].frag.startOffset = 0;
-                    interpState->trace[lastTraceDesc].frag.numInsts = 0;
-                    interpState->trace[lastTraceDesc].frag.hint = kJitHintNone;
-                    interpState->trace[lastTraceDesc].frag.isCode = true;
+                if (!self->trace[lastTraceDesc].isCode) {
+                    lastTraceDesc = ++self->currTraceRun;
+                    self->trace[lastTraceDesc].info.frag.startOffset = 0;
+                    self->trace[lastTraceDesc].info.frag.numInsts = 0;
+                    self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
+                    self->trace[lastTraceDesc].isCode = true;
                 }
 
                 /* Mark the end of the trace runs */
-                interpState->trace[lastTraceDesc].frag.runEnd = true;
+                self->trace[lastTraceDesc].info.frag.runEnd = true;
 
                 JitTraceDescription* desc =
                    (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
-                     sizeof(JitTraceRun) * (interpState->currTraceRun+1));
+                     sizeof(JitTraceRun) * (self->currTraceRun+1));
 
                 if (desc == NULL) {
                     LOGE("Out of memory in trace selection");
                     dvmJitStopTranslationRequests();
-                    interpState->jitState = kJitDone;
+                    self->jitState = kJitDone;
                     switchInterp = true;
                     break;
                 }
 
-                desc->method = interpState->method;
+                desc->method = self->interpSave.method;
                 memcpy((char*)&(desc->trace[0]),
-                    (char*)&(interpState->trace[0]),
-                    sizeof(JitTraceRun) * (interpState->currTraceRun+1));
+                    (char*)&(self->trace[0]),
+                    sizeof(JitTraceRun) * (self->currTraceRun+1));
 #if defined(SHOW_TRACE)
                 LOGD("TraceGen:  trace done, adding to queue");
 #endif
                 if (dvmCompilerWorkEnqueue(
-                       interpState->currTraceHead,kWorkOrderTrace,desc)) {
+                       self->currTraceHead,kWorkOrderTrace,desc)) {
                     /* Work order successfully enqueued */
                     if (gDvmJit.blockingMode) {
                         dvmCompilerDrainQueue();
@@ -890,21 +908,12 @@
                      */
                     free(desc);
                 }
-                /*
-                 * Reset "trace in progress" flag whether or not we
-                 * successfully entered a work order.
-                 */
-                JitEntry *jitEntry =
-                    lookupAndAdd(interpState->currTraceHead, false);
-                if (jitEntry) {
-                    setTraceConstruction(jitEntry, false);
-                }
-                interpState->jitState = kJitDone;
+                self->jitState = kJitDone;
                 switchInterp = true;
             }
             break;
         case kJitSingleStep:
-            interpState->jitState = kJitSingleStepEnd;
+            self->jitState = kJitSingleStepEnd;
             break;
         case kJitSingleStepEnd:
             /*
@@ -915,12 +924,12 @@
              * cannot be reset.
              */
             if (dvmJitStayInPortableInterpreter()) {
-                interpState->entryPoint = kInterpEntryInstr;
+                self->entryPoint = kInterpEntryInstr;
                 self->inJitCodeCache = 0;
             } else {
-                interpState->entryPoint = kInterpEntryResume;
+                self->entryPoint = kInterpEntryResume;
             }
-            interpState->jitState = kJitDone;
+            self->jitState = kJitDone;
             switchInterp = true;
             break;
         case kJitDone:
@@ -928,13 +937,13 @@
             break;
 #if defined(WITH_SELF_VERIFICATION)
         case kJitSelfVerification:
-            if (selfVerificationDebugInterp(pc, self, interpState)) {
+            if (selfVerificationDebugInterp(pc, self)) {
                 /*
                  * If the next state is not single-step end, we can switch
                  * interpreter now.
                  */
-                if (interpState->jitState != kJitSingleStepEnd) {
-                    interpState->jitState = kJitDone;
+                if (self->jitState != kJitSingleStepEnd) {
+                    self->jitState = kJitDone;
                     switchInterp = true;
                 }
             }
@@ -945,7 +954,7 @@
             break;
         default:
             LOGE("Unexpected JIT state: %d entry point: %d",
-                 interpState->jitState, interpState->entryPoint);
+                 self->jitState, self->entryPoint);
             dvmAbort();
             break;
     }
@@ -953,24 +962,27 @@
      * Final check to see if we can really switch the interpreter. Make sure
      * the jitState is kJitDone or kJitNot when switchInterp is set to true.
      */
-     assert(switchInterp == false || interpState->jitState == kJitDone ||
-            interpState->jitState == kJitNot);
+     assert(switchInterp == false || self->jitState == kJitDone ||
+            self->jitState == kJitNot);
      return switchInterp && !debugOrProfile && !stayOneMoreInst &&
             !dvmJitStayInPortableInterpreter();
 }
 
-JitEntry *dvmFindJitEntry(const u2* pc)
+JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry)
 {
     int idx = dvmJitHash(pc);
 
     /* Expect a high hit rate on 1st shot */
-    if (gDvmJit.pJitEntryTable[idx].dPC == pc)
+    if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
+        (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry))
         return &gDvmJit.pJitEntryTable[idx];
     else {
         int chainEndMarker = gDvmJit.jitTableSize;
         while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
             idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
-            if (gDvmJit.pJitEntryTable[idx].dPC == pc)
+            if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
+                (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
+                isMethodEntry))
                 return &gDvmJit.pJitEntryTable[idx];
         }
     }
@@ -978,32 +990,43 @@
 }
 
 /*
- * If a translated code address exists for the davik byte code
- * pointer return it.  This routine needs to be fast.
+ * Walk through the JIT profile table and find the corresponding JIT code, in
+ * the specified format (ie trace vs method). This routine needs to be fast.
  */
-void* dvmJitGetCodeAddr(const u2* dPC)
+void* getCodeAddrCommon(const u2* dPC, bool methodEntry)
 {
     int idx = dvmJitHash(dPC);
-    const u2* npc = gDvmJit.pJitEntryTable[idx].dPC;
-    if (npc != NULL) {
+    const u2* pc = gDvmJit.pJitEntryTable[idx].dPC;
+    if (pc != NULL) {
         bool hideTranslation = dvmJitHideTranslation();
-
-        if (npc == dPC) {
+        if (pc == dPC &&
+            gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) {
+            int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ?
+                 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
+            intptr_t codeAddress =
+                (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
 #if defined(WITH_JIT_TUNING)
             gDvmJit.addrLookupsFound++;
 #endif
-            return hideTranslation ?
-                NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
+            return hideTranslation || !codeAddress ?  NULL :
+                  (void *)(codeAddress + offset);
         } else {
             int chainEndMarker = gDvmJit.jitTableSize;
             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
                 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
-                if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
+                if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
+                    gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
+                        methodEntry) {
+                    int offset = (gDvmJit.profileMode >=
+                        kTraceProfilingContinuous) ? 0 :
+                        gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
+                    intptr_t codeAddress =
+                        (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
 #if defined(WITH_JIT_TUNING)
                     gDvmJit.addrLookupsFound++;
 #endif
-                    return hideTranslation ?
-                        NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
+                    return hideTranslation || !codeAddress ? NULL :
+                        (void *)(codeAddress + offset);
                 }
             }
         }
@@ -1015,22 +1038,55 @@
 }
 
 /*
+ * If a translated code address, in trace format, exists for the davik byte code
+ * pointer return it.
+ */
+void* dvmJitGetTraceAddr(const u2* dPC)
+{
+    return getCodeAddrCommon(dPC, false /* method entry */);
+}
+
+/*
+ * If a translated code address, in whole-method format, exists for the davik
+ * byte code pointer return it.
+ */
+void* dvmJitGetMethodAddr(const u2* dPC)
+{
+    return getCodeAddrCommon(dPC, true /* method entry */);
+}
+
+/*
  * Register the translated code pointer into the JitTable.
  * NOTE: Once a codeAddress field transitions from initial state to
  * JIT'd code, it must not be altered without first halting all
  * threads.  This routine should only be called by the compiler
- * thread.
+ * thread.  We defer the setting of the profile prefix size until
+ * after the new code address is set to ensure that the prefix offset
+ * is never applied to the initial interpret-only translation.  All
+ * translations with non-zero profile prefixes will still be correct
+ * if entered as if the profile offset is 0, but the interpret-only
+ * template cannot handle a non-zero prefix.
  */
-void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
+void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
+                       bool isMethodEntry, int profilePrefixSize)
+{
     JitEntryInfoUnion oldValue;
     JitEntryInfoUnion newValue;
-    JitEntry *jitEntry = lookupAndAdd(dPC, false);
+    /*
+     * Method-based JIT doesn't go through the normal profiling phase, so use
+     * lookupAndAdd here to request a new entry in the table.
+     */
+    JitEntry *jitEntry = isMethodEntry ?
+        lookupAndAdd(dPC, false /* caller locked */, true) :
+        dvmJitFindEntry(dPC, isMethodEntry);
     assert(jitEntry);
     /* Note: order of update is important */
     do {
         oldValue = jitEntry->u;
         newValue = oldValue;
+        newValue.info.isMethodEntry = isMethodEntry;
         newValue.info.instructionSet = set;
+        newValue.info.profileOffset = profilePrefixSize;
     } while (android_atomic_release_cas(
              oldValue.infoWord, newValue.infoWord,
              &jitEntry->u.infoWord) != 0);
@@ -1042,7 +1098,7 @@
  * if we need to abort and switch back to the fast interpreter, false
  * otherwise.
  */
-bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
+bool dvmJitCheckTraceRequest(Thread* self)
 {
     bool switchInterp = false;         /* Assume success */
     int i;
@@ -1064,7 +1120,7 @@
      * number of very hot loops, we would want the second-level filter
      * to be very selective.  A good selective filter is requiring an
      * exact match of the Dalvik PC.  In other words, defining filterKey as:
-     *     intptr_t filterKey = (intptr_t)interpState->pc
+     *     intptr_t filterKey = (intptr_t)self->interpSave.pc
      *
      * However, for flat execution profiles we do best when aggressively
      * translating.  A heuristically decent proxy for this is to use
@@ -1072,7 +1128,7 @@
      * Intuitively, this is saying that once any trace in a method appears hot,
      * immediately translate any other trace from that same method that
      * survives the first-level filter.  Here, filterKey would be defined as:
-     *     intptr_t filterKey = (intptr_t)interpState->method
+     *     intptr_t filterKey = (intptr_t)self->interpSave.method
      *
      * The problem is that we can't easily detect whether we're dealing
      * with a spiky or flat profile.  If we go with the "pc" match approach,
@@ -1094,9 +1150,9 @@
      * alignment for method pointers, and half-word alignment of the Dalvik pc.
      * for method pointers and half-word alignment for dalvik pc.
      */
-    u4 methodKey = (u4)interpState->method <<
+    u4 methodKey = (u4)self->interpSave.method <<
                    (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
-    u4 pcKey = ((u4)interpState->pc >> 1) &
+    u4 pcKey = ((u4)self->interpSave.pc >> 1) &
                ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
     intptr_t filterKey = (intptr_t)(methodKey | pcKey);
     bool debugOrProfile = dvmDebuggerOrProfilerActive();
@@ -1104,12 +1160,12 @@
     /* Check if the JIT request can be handled now */
     if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
         /* Bypass the filter for hot trace requests or during stress mode */
-        if (interpState->jitState == kJitTSelectRequest &&
+        if (self->jitState == kJitTSelectRequest &&
             gDvmJit.threshold > 6) {
             /* Two-level filtering scheme */
             for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
-                if (filterKey == interpState->threshFilter[i]) {
-                    interpState->threshFilter[i] = 0; // Reset filter entry
+                if (filterKey == self->threshFilter[i]) {
+                    self->threshFilter[i] = 0; // Reset filter entry
                     break;
                 }
             }
@@ -1120,78 +1176,59 @@
                  * filter array.
                  */
                 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
-                interpState->threshFilter[i] = filterKey;
-                interpState->jitState = kJitDone;
+                self->threshFilter[i] = filterKey;
+                self->jitState = kJitDone;
             }
         }
 
         /* If the compiler is backlogged, cancel any JIT actions */
         if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
-            interpState->jitState = kJitDone;
+            self->jitState = kJitDone;
         }
 
         /*
          * Check for additional reasons that might force the trace select
          * request to be dropped
          */
-        if (interpState->jitState == kJitTSelectRequest ||
-            interpState->jitState == kJitTSelectRequestHot) {
-            JitEntry *slot = lookupAndAdd(interpState->pc, false);
-            if (slot == NULL) {
-                /*
-                 * Table is full.  This should have been
-                 * detected by the compiler thread and the table
-                 * resized before we run into it here.  Assume bad things
-                 * are afoot and disable profiling.
-                 */
-                interpState->jitState = kJitDone;
-                LOGD("JIT: JitTable full, disabling profiling");
-                dvmJitStopTranslationRequests();
-            } else if (slot->u.info.traceConstruction) {
-                /*
-                 * Trace request already in progress, but most likely it
-                 * aborted without cleaning up.  Assume the worst and
-                 * mark trace head as untranslatable.  If we're wrong,
-                 * the compiler thread will correct the entry when the
-                 * translation is completed.  The downside here is that
-                 * some existing translation may chain to the interpret-only
-                 * template instead of the real translation during this
-                 * window.  Performance, but not correctness, issue.
-                 */
-                interpState->jitState = kJitDone;
-                resetTracehead(interpState, slot);
-            } else if (slot->codeAddress) {
-                 /* Nothing to do here - just return */
-                interpState->jitState = kJitDone;
+        if (self->jitState == kJitTSelectRequest ||
+            self->jitState == kJitTSelectRequestHot) {
+            if (dvmJitFindEntry(self->interpSave.pc, false)) {
+                /* In progress - nothing do do */
+               self->jitState = kJitDone;
             } else {
-                /*
-                 * Mark request.  Note, we are not guaranteed exclusivity
-                 * here.  A window exists for another thread to be
-                 * attempting to build this same trace.  Rather than
-                 * bear the cost of locking, we'll just allow that to
-                 * happen.  The compiler thread, if it chooses, can
-                 * discard redundant requests.
-                 */
-                setTraceConstruction(slot, true);
+                JitEntry *slot = lookupAndAdd(self->interpSave.pc,
+                                              false /* lock */,
+                                              false /* method entry */);
+                if (slot == NULL) {
+                    /*
+                     * Table is full.  This should have been
+                     * detected by the compiler thread and the table
+                     * resized before we run into it here.  Assume bad things
+                     * are afoot and disable profiling.
+                     */
+                    self->jitState = kJitDone;
+                    LOGD("JIT: JitTable full, disabling profiling");
+                    dvmJitStopTranslationRequests();
+                }
             }
         }
 
-        switch (interpState->jitState) {
+        switch (self->jitState) {
             case kJitTSelectRequest:
             case kJitTSelectRequestHot:
-                interpState->jitState = kJitTSelect;
-                interpState->currTraceHead = interpState->pc;
-                interpState->currTraceRun = 0;
-                interpState->totalTraceLen = 0;
-                interpState->currRunHead = interpState->pc;
-                interpState->currRunLen = 0;
-                interpState->trace[0].frag.startOffset =
-                     interpState->pc - interpState->method->insns;
-                interpState->trace[0].frag.numInsts = 0;
-                interpState->trace[0].frag.runEnd = false;
-                interpState->trace[0].frag.hint = kJitHintNone;
-                interpState->trace[0].frag.isCode = true;
-                interpState->lastPC = 0;
+                self->jitState = kJitTSelect;
+                self->currTraceHead = self->interpSave.pc;
+                self->currTraceRun = 0;
+                self->totalTraceLen = 0;
+                self->currRunHead = self->interpSave.pc;
+                self->currRunLen = 0;
+                self->trace[0].info.frag.startOffset =
+                     self->interpSave.pc - self->interpSave.method->insns;
+                self->trace[0].info.frag.numInsts = 0;
+                self->trace[0].info.frag.runEnd = false;
+                self->trace[0].info.frag.hint = kJitHintNone;
+                self->trace[0].isCode = true;
+                self->lastPC = 0;
                 break;
             /*
              * For JIT's perspective there is no need to stay in the debug
@@ -1202,14 +1239,14 @@
                 break;
             default:
                 LOGE("Unexpected JIT state: %d entry point: %d",
-                     interpState->jitState, interpState->entryPoint);
+                     self->jitState, self->entryPoint);
                 dvmAbort();
         }
     } else {
         /*
          * Cannot build trace this time - ready to leave the dbg interpreter
          */
-        interpState->jitState = kJitDone;
+        self->jitState = kJitDone;
         switchInterp = true;
     }
 
@@ -1217,7 +1254,7 @@
      * Final check to see if we can really switch the interpreter. Make sure
      * the jitState is kJitDone when switchInterp is set to true.
      */
-    assert(switchInterp == false || interpState->jitState == kJitDone);
+    assert(switchInterp == false || self->jitState == kJitDone);
     return switchInterp && !debugOrProfile &&
            !dvmJitStayInPortableInterpreter();
 }
@@ -1278,7 +1315,8 @@
         if (pOldTable[i].dPC) {
             JitEntry *p;
             u2 chain;
-            p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ );
+            p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/,
+                             pOldTable[i].u.info.isMethodEntry);
             p->codeAddress = pOldTable[i].codeAddress;
             /* We need to preserve the new chain field, but copy the rest */
             chain = p->u.info.chain;
@@ -1286,6 +1324,7 @@
             p->u.info.chain = chain;
         }
     }
+
     dvmUnlockMutex(&gDvmJit.tableLock);
 
     free(pOldTable);
@@ -1306,6 +1345,17 @@
     unsigned int i;
 
     dvmLockMutex(&gDvmJit.tableLock);
+
+    /* Note: If need to preserve any existing counts. Do so here. */
+    if (gDvmJit.pJitTraceProfCounters) {
+        for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) {
+            if (gDvmJit.pJitTraceProfCounters->buckets[i])
+                memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i],
+                       0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES);
+        }
+        gDvmJit.pJitTraceProfCounters->next = 0;
+    }
+
     memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
     for (i=0; i< size; i++) {
         jitEntry[i].u.info.chain = size;  /* Initialize chain termination */
@@ -1315,6 +1365,31 @@
 }
 
 /*
+ * Return the address of the next trace profile counter.  This address
+ * will be embedded in the generated code for the trace, and thus cannot
+ * change while the trace exists.
+ */
+JitTraceCounter_t *dvmJitNextTraceCounter()
+{
+    int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES;
+    int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES;
+    JitTraceCounter_t *res;
+    /* Lazily allocate blocks of counters */
+    if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) {
+        JitTraceCounter_t *p =
+              (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p));
+        if (!p) {
+            LOGE("Failed to allocate block of trace profile counters");
+            dvmAbort();
+        }
+        gDvmJit.pJitTraceProfCounters->buckets[idx] = p;
+    }
+    res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem];
+    gDvmJit.pJitTraceProfCounters->next++;
+    return res;
+}
+
+/*
  * Float/double conversion requires clamping to min and max of integer form.  If
  * target doesn't support this normally, use these.
  */
@@ -1346,4 +1421,33 @@
         return (s8)f;
 }
 
+/* Should only be called by the compiler thread */
+void dvmJitChangeProfileMode(TraceProfilingModes newState)
+{
+    if (gDvmJit.profileMode != newState) {
+        gDvmJit.profileMode = newState;
+        dvmJitUnchainAll();
+    }
+}
+
+void dvmJitTraceProfilingOn()
+{
+    if (gDvmJit.profileMode == kTraceProfilingPeriodicOff)
+        dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
+                                    (void*) kTraceProfilingPeriodicOn);
+    else if (gDvmJit.profileMode == kTraceProfilingDisabled)
+        dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
+                                    (void*) kTraceProfilingContinuous);
+}
+
+void dvmJitTraceProfilingOff()
+{
+    if (gDvmJit.profileMode == kTraceProfilingPeriodicOn)
+        dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
+                                    (void*) kTraceProfilingPeriodicOff);
+    else if (gDvmJit.profileMode == kTraceProfilingContinuous)
+        dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
+                                    (void*) kTraceProfilingDisabled);
+}
+
 #endif /* WITH_JIT */
diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h
index 6101f54..9597f2f 100644
--- a/vm/interp/Jit.h
+++ b/vm/interp/Jit.h
@@ -41,13 +41,15 @@
 
 typedef struct ShadowSpace {
     const u2* startPC;          /* starting pc of jitted region */
-    const void* fp;             /* starting fp of jitted region */
-    void* glue;                 /* starting glue of jitted region */
+    u4* fp;                     /* starting fp of jitted region */
+    const Method *method;
+    DvmDex* methodClassDex;
+    JValue retval;
+    const u1* interpStackEnd;
     SelfVerificationState jitExitState;  /* exit point for JIT'ed code */
     SelfVerificationState selfVerificationState;  /* current SV running state */
     const u2* endPC;            /* ending pc of jitted region */
     void* shadowFP;       /* pointer to fp in shadow space */
-    InterpState interpState;    /* copy of interpState */
     int* registerSpace;         /* copy of register state */
     int registerSpaceSize;      /* current size of register space */
     ShadowHeap heapSpace[HEAP_SPACE]; /* copy of heap space */
@@ -55,7 +57,6 @@
     const void* endShadowFP;    /* ending fp in shadow space */
     InstructionTrace trace[JIT_MAX_TRACE_LEN]; /* opcode trace for debugging */
     int traceLength;            /* counter for current trace length */
-    const Method* method;       /* starting method of jitted region */
 } ShadowSpace;
 
 /*
@@ -63,14 +64,23 @@
  */
 void* dvmSelfVerificationShadowSpaceAlloc(Thread* self);
 void dvmSelfVerificationShadowSpaceFree(Thread* self);
-void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
-                                   InterpState* interpState,
+void* dvmSelfVerificationSaveState(const u2* pc, u4* fp,
+                                   Thread* self,
                                    int targetTrace);
-void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
-                                      SelfVerificationState exitPoint);
+void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp,
+                                      SelfVerificationState exitPoint,
+                                      Thread *self);
 #endif
 
 /*
+ * Offsets for metadata in the trace run array from the trace that ends with
+ * invoke instructions.
+ */
+#define JIT_TRACE_CLASS_DESC    1
+#define JIT_TRACE_CLASS_LOADER  2
+#define JIT_TRACE_CUR_METHOD    3
+
+/*
  * JitTable hash function.
  */
 
@@ -83,18 +93,41 @@
 }
 
 /*
+ * The width of the chain field in JitEntryInfo sets the upper
+ * bound on the number of translations.  Be careful if changing
+ * the size of JitEntry struct - the Dalvik PC to JitEntry
+ * hash functions have built-in knowledge of the size.
+ */
+#define JIT_ENTRY_CHAIN_WIDTH 2
+#define JIT_MAX_ENTRIES (1 << (JIT_ENTRY_CHAIN_WIDTH * 8))
+
+/*
+ * The trace profiling counters are allocated in blocks and individual
+ * counters must not move so long as any referencing trace exists.
+ */
+#define JIT_PROF_BLOCK_ENTRIES 1024
+#define JIT_PROF_BLOCK_BUCKETS (JIT_MAX_ENTRIES / JIT_PROF_BLOCK_ENTRIES)
+
+typedef s4 JitTraceCounter_t;
+
+typedef struct JitTraceProfCounters {
+    unsigned int           next;
+    JitTraceCounter_t      *buckets[JIT_PROF_BLOCK_BUCKETS];
+} JitTraceProfCounters;
+
+/*
  * Entries in the JIT's address lookup hash table.
  * Fields which may be updated by multiple threads packed into a
  * single 32-bit word to allow use of atomic update.
  */
 
 typedef struct JitEntryInfo {
-    unsigned int           traceConstruction:1;   /* build underway? */
     unsigned int           isMethodEntry:1;
     unsigned int           inlineCandidate:1;
     unsigned int           profileEnabled:1;
-    JitInstructionSetType  instructionSet:4;
-    unsigned int           unused:8;
+    JitInstructionSetType  instructionSet:3;
+    unsigned int           profileOffset:5;
+    unsigned int           unused:5;
     u2                     chain;                 /* Index of next in chain */
 } JitEntryInfo;
 
@@ -109,18 +142,24 @@
     void*               codeAddress;    /* Code address of native translation */
 } JitEntry;
 
-int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState,
-                const ClassObject *callsiteClass, const Method* curMethod);
-void* dvmJitGetCodeAddr(const u2* dPC);
-bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState);
+int dvmCheckJit(const u2* pc, Thread* self, const ClassObject *callsiteClass,
+                const Method* curMethod);
+void* dvmJitGetTraceAddr(const u2* dPC);
+void* dvmJitGetMethodAddr(const u2* dPC);
+bool dvmJitCheckTraceRequest(Thread* self);
 void dvmJitStopTranslationRequests(void);
 void dvmJitStats(void);
 bool dvmJitResizeJitTable(unsigned int size);
 void dvmJitResetTable(void);
-struct JitEntry *dvmFindJitEntry(const u2* pc);
+struct JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry);
 s8 dvmJitd2l(double d);
 s8 dvmJitf2l(float f);
-void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set);
-void dvmJitAbortTraceSelect(InterpState* interpState);
+void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
+                       bool isMethodEntry, int profilePrefixSize);
+void dvmJitEndTraceSelect(Thread* self);
+JitTraceCounter_t *dvmJitNextTraceCounter(void);
+void dvmJitTraceProfilingOff(void);
+void dvmJitTraceProfilingOn(void);
+void dvmJitChangeProfileMode(TraceProfilingModes newState);
 
 #endif /*_DALVIK_INTERP_JIT*/
diff --git a/vm/interp/Stack.c b/vm/interp/Stack.c
index b0aad81..97fbeb5 100644
--- a/vm/interp/Stack.c
+++ b/vm/interp/Stack.c
@@ -100,7 +100,7 @@
     memset(stackPtr - (method->outsSize*4), 0xaf, stackReq);
 #endif
 #ifdef EASY_GDB
-    breakSaveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+    breakSaveBlock->prevSave = (StackSaveArea*)FP_FROM_SAVEAREA(self->curFrame);
     saveBlock->prevSave = breakSaveBlock;
 #endif
 
@@ -176,8 +176,10 @@
 #ifdef EASY_GDB
     if (self->curFrame == NULL)
         breakSaveBlock->prevSave = NULL;
-    else
-        breakSaveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+    else {
+        void* fp = FP_FROM_SAVEAREA(self->curFrame);
+        breakSaveBlock->prevSave = (StackSaveArea*)fp;
+    }
     saveBlock->prevSave = breakSaveBlock;
 #endif
 
@@ -187,11 +189,7 @@
     breakSaveBlock->method = NULL;
     saveBlock->prevFrame = FP_FROM_SAVEAREA(breakSaveBlock);
     saveBlock->savedPc = NULL;                  // not required
-#ifdef USE_INDIRECT_REF
     saveBlock->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-    saveBlock->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
     saveBlock->method = method;
 
     LOGVV("PUSH JNI frame: old=%p new=%p (size=%d)\n",
@@ -245,16 +243,12 @@
     memset(stackPtr, 0xaf, stackReq);
 #endif
 #ifdef EASY_GDB
-    saveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+    saveBlock->prevSave = (StackSaveArea*)FP_FROM_SAVEAREA(self->curFrame);
 #endif
 
     saveBlock->prevFrame = self->curFrame;
     saveBlock->savedPc = NULL;                  // not required
-#ifdef USE_INDIRECT_REF
     saveBlock->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-    saveBlock->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
     saveBlock->method = method;
 
     LOGVV("PUSH JNI local frame: old=%p new=%p (size=%d)\n",
@@ -276,13 +270,13 @@
 {
     StackSaveArea* saveBlock = SAVEAREA_FROM_FP(self->curFrame);
 
-    assert(!dvmIsBreakFrame(self->curFrame));
+    assert(!dvmIsBreakFrame((u4*)self->curFrame));
     if (saveBlock->method != SAVEAREA_FROM_FP(saveBlock->prevFrame)->method) {
         /*
          * The previous frame doesn't have the same method pointer -- we've
          * been asked to pop too much.
          */
-        assert(dvmIsBreakFrame(saveBlock->prevFrame) ||
+        assert(dvmIsBreakFrame((u4*)saveBlock->prevFrame) ||
                !dvmIsNativeMethod(
                        SAVEAREA_FROM_FP(saveBlock->prevFrame)->method));
         return false;
@@ -314,7 +308,7 @@
         return false;
 
     saveBlock = SAVEAREA_FROM_FP(self->curFrame);
-    assert(!dvmIsBreakFrame(self->curFrame));
+    assert(!dvmIsBreakFrame((u4*)self->curFrame));
 
     /*
      * Remove everything up to the break frame.  If this was a call into
@@ -390,8 +384,7 @@
                 method))
         {
             /* note this throws IAException, not IAError */
-            dvmThrowException("Ljava/lang/IllegalAccessException;",
-                "access to method denied");
+            dvmThrowIllegalAccessException("access to method denied");
             return NULL;
         }
     }
@@ -523,7 +516,7 @@
          * Because we leave no space for local variables, "curFrame" points
          * directly at the method arguments.
          */
-        (*method->nativeFunc)(self->curFrame, pResult, method, self);
+        (*method->nativeFunc)((u4*)self->curFrame, pResult, method, self);
         TRACE_METHOD_EXIT(self, method);
     } else {
         dvmInterpret(self, method, pResult);
@@ -627,7 +620,7 @@
          * Because we leave no space for local variables, "curFrame" points
          * directly at the method arguments.
          */
-        (*method->nativeFunc)(self->curFrame, pResult, method, self);
+        (*method->nativeFunc)((u4*)self->curFrame, pResult, method, self);
         TRACE_METHOD_EXIT(self, method);
     } else {
         dvmInterpret(self, method, pResult);
@@ -644,7 +637,7 @@
     char* actualClassName = (arg != NULL)
         ? dvmHumanReadableDescriptor(arg->obj.clazz->descriptor)
         : strdup("null");
-    dvmThrowExceptionFmt("Ljava/lang/IllegalArgumentException;",
+    dvmThrowExceptionFmt(gDvm.exIllegalArgumentException,
         "argument %d should have type %s, got %s",
         argIndex + 1, expectedClassName, actualClassName);
     free(expectedClassName);
@@ -680,7 +673,7 @@
     else
         argListLength = 0;
     if (argListLength != (int) params->length) {
-        dvmThrowExceptionFmt("Ljava/lang/IllegalArgumentException;",
+        dvmThrowExceptionFmt(gDvm.exIllegalArgumentException,
             "wrong number of arguments; expected %d, got %d",
             params->length, argListLength);
         return NULL;
@@ -744,7 +737,7 @@
          * Because we leave no space for local variables, "curFrame" points
          * directly at the method arguments.
          */
-        (*method->nativeFunc)(self->curFrame, &retval, method, self);
+        (*method->nativeFunc)((u4*)self->curFrame, &retval, method, self);
         TRACE_METHOD_EXIT(self, method);
     } else {
         dvmInterpret(self, method, &retval);
@@ -855,7 +848,7 @@
     int count = 0;
 
     for ( ; fp != NULL; fp = SAVEAREA_FROM_FP(fp)->prevFrame) {
-        if (!dvmIsBreakFrame(fp))
+        if (!dvmIsBreakFrame((u4*)fp))
             count++;
     }
 
@@ -891,7 +884,7 @@
     StackSaveArea* saveArea;
 
 retry:
-    if (dvmIsBreakFrame(caller)) {
+    if (dvmIsBreakFrame((u4*)caller)) {
         /* pop up one more */
         caller = SAVEAREA_FROM_FP(caller)->prevFrame;
         if (caller == NULL)
@@ -941,7 +934,7 @@
     void* callerCaller;
 
     /* at the top? */
-    if (dvmIsBreakFrame(caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
+    if (dvmIsBreakFrame((u4*)caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
         return NULL;
 
     /* go one more */
@@ -964,7 +957,7 @@
     int i;
 
     /* at the top? */
-    if (dvmIsBreakFrame(caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
+    if (dvmIsBreakFrame((u4*)caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
         return NULL;
 
     /* Walk up two frames if possible. */
@@ -1001,7 +994,7 @@
         return false;
 
     for (idx = 0; fp != NULL; fp = SAVEAREA_FROM_FP(fp)->prevFrame) {
-        if (!dvmIsBreakFrame(fp))
+        if (!dvmIsBreakFrame((u4*)fp))
             array[idx++] = SAVEAREA_FROM_FP(fp)->method;
     }
     assert(idx == depth);
@@ -1063,8 +1056,7 @@
         LOGW("Stack overflow while throwing exception\n");
         dvmClearException(self);
     }
-    dvmThrowChainedExceptionByClass(gDvm.classJavaLangStackOverflowError,
-        NULL, excep);
+    dvmThrowChainedException(gDvm.exStackOverflowError, NULL, excep);
 }
 
 /*
@@ -1077,7 +1069,7 @@
 
     assert(self->stackOverflowed);
 
-    if (exception->clazz != gDvm.classJavaLangStackOverflowError) {
+    if (exception->clazz != gDvm.exStackOverflowError) {
         /* exception caused during SOE, not the SOE itself */
         return;
     }
@@ -1116,7 +1108,7 @@
 {
     void* framePtr = thread->curFrame;
 
-    if (framePtr == NULL || dvmIsBreakFrame(framePtr))
+    if (framePtr == NULL || dvmIsBreakFrame((u4*)framePtr))
         return false;
 
     const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
@@ -1210,7 +1202,7 @@
      * The "currentPc" is updated whenever we execute an instruction that
      * might throw an exception.  Show it here.
      */
-    if (framePtr != NULL && !dvmIsBreakFrame(framePtr)) {
+    if (framePtr != NULL && !dvmIsBreakFrame((u4*)framePtr)) {
         saveArea = SAVEAREA_FROM_FP(framePtr);
 
         if (saveArea->xtra.currentPc != NULL)
@@ -1221,7 +1213,7 @@
         saveArea = SAVEAREA_FROM_FP(framePtr);
         method = saveArea->method;
 
-        if (dvmIsBreakFrame(framePtr)) {
+        if (dvmIsBreakFrame((u4*)framePtr)) {
             //dvmPrintDebugMessage(target, "  (break frame)\n");
         } else {
             int relPc;
diff --git a/vm/interp/Stack.h b/vm/interp/Stack.h
index 3f76cb1..10c04a0 100644
--- a/vm/interp/Stack.h
+++ b/vm/interp/Stack.h
@@ -140,11 +140,7 @@
 
     union {
         /* for JNI native methods: bottom of local reference segment */
-#ifdef USE_INDIRECT_REF
         u4          localRefCookie;
-#else
-        Object**    localRefCookie;
-#endif
 
         /* for interpreted methods: saved current PC, for exception stack
          * traces and debugger traces */
diff --git a/vm/jdwp/ExpandBuf.c b/vm/jdwp/ExpandBuf.c
index ade239c..cb3386b 100644
--- a/vm/jdwp/ExpandBuf.c
+++ b/vm/jdwp/ExpandBuf.c
@@ -93,7 +93,7 @@
     while (pBuf->curLen + newCount > pBuf->maxLen)
         pBuf->maxLen *= 2;
 
-    newPtr = realloc(pBuf->storage, pBuf->maxLen);
+    newPtr = (u1*) realloc(pBuf->storage, pBuf->maxLen);
     if (newPtr == NULL) {
         LOGE("realloc(%d) failed\n", pBuf->maxLen);
         abort();
diff --git a/vm/jdwp/JdwpAdb.c b/vm/jdwp/JdwpAdb.c
index c3a1a72..e73814d 100644
--- a/vm/jdwp/JdwpAdb.c
+++ b/vm/jdwp/JdwpAdb.c
@@ -92,7 +92,7 @@
 static JdwpNetState*
 adbStateAlloc(void)
 {
-    JdwpNetState*   netState = calloc(sizeof(*netState),1);
+    JdwpNetState* netState = (JdwpNetState*) calloc(sizeof(*netState),1);
 
     netState->controlSock = -1;
     netState->clientSock  = -1;
diff --git a/vm/jdwp/JdwpMain.c b/vm/jdwp/JdwpMain.c
index 24e5c6c..b4471da 100644
--- a/vm/jdwp/JdwpMain.c
+++ b/vm/jdwp/JdwpMain.c
@@ -393,7 +393,7 @@
  */
 s8 dvmJdwpLastDebuggerActivity(JdwpState* state)
 {
-    if (!gDvm.debuggerActive) {
+    if (!DEBUGGER_ACTIVE) {
         LOGD("dvmJdwpLastDebuggerActivity: no active debugger\n");
         return -1;
     }
diff --git a/vm/mterp/Mterp.c b/vm/mterp/Mterp.c
index f4740fe..f6e329f 100644
--- a/vm/mterp/Mterp.c
+++ b/vm/mterp/Mterp.c
@@ -31,8 +31,8 @@
 
 #ifndef DVM_NO_ASM_INTERP
 
-    extern char dvmAsmInstructionStart[];
-    extern char dvmAsmInstructionEnd[];
+    extern void* dvmAsmInstructionStart[];
+    extern void* dvmAsmInstructionEnd[];
 
 #define ASM_DEF_VERIFY
 #include "mterp/common/asm-constants.h"
@@ -42,17 +42,22 @@
         dvmAbort();
     }
 
+#ifndef DVM_JMP_TABLE_MTERP
     /*
-     * If an instruction overflows the 64-byte handler size limit, it will
-     * push everything up and alter the total size.  Check it here.
+     * If we're using computed goto instruction transitions, make sure
+     * none of the handlers overflows the 64-byte limit.  This won't tell
+     * which one did, but if any one is too big the total size will
+     * overflow.
      */
     const int width = 64;
-    int interpSize = dvmAsmInstructionEnd - dvmAsmInstructionStart;
+    int interpSize = (uintptr_t) dvmAsmInstructionEnd -
+                     (uintptr_t) dvmAsmInstructionStart;
     if (interpSize != 0 && interpSize != kNumPackedOpcodes*width) {
         LOGE("ERROR: unexpected asm interp size %d\n", interpSize);
         LOGE("(did an instruction handler exceed %d bytes?)\n", width);
         dvmAbort();
     }
+#endif
 
 #endif // ndef DVM_NO_ASM_INTERP
 
@@ -61,63 +66,51 @@
 
 
 /*
- * "Standard" mterp entry point.  This sets up a "glue" structure and then
- * calls into the assembly interpreter implementation.
- *
+ * "Standard" mterp entry point.
  * (There is presently no "debug" entry point.)
  */
-bool dvmMterpStd(Thread* self, InterpState* glue)
+bool dvmMterpStd(Thread* self)
 {
     int changeInterp;
 
     /* configure mterp items */
-    glue->self = self;
-    glue->methodClassDex = glue->method->clazz->pDvmDex;
+    self->interpSave.methodClassDex = self->interpSave.method->clazz->pDvmDex;
 
-    glue->interpStackEnd = self->interpStackEnd;
-    glue->pSelfSuspendCount = &self->suspendCount;
-    glue->cardTable = gDvm.biasedCardTableBase;
 #if defined(WITH_JIT)
-    glue->pJitProfTable = gDvmJit.pProfTable;
-    glue->ppJitProfTable = &gDvmJit.pProfTable;
-    glue->jitThreshold = gDvmJit.threshold;
-#endif
-#if defined(WITH_INLINE_PROFILING)
     /*
-     * If WITH_INLINE_PROFILING is defined, we won't switch to the debug
-     * interpreter when a new method is entered. So we need to register the
-     * METHOD_ENTER action here.
-     */
-    if (glue->debugIsMethodEntry) {
-        glue->debugIsMethodEntry = false;
-        TRACE_METHOD_ENTER(self, glue->method);
-    }
+     * FIXME: temporary workaround.  When we have the ability to
+     * walk through the thread list to initialize mterp & JIT state,
+     * elminate this line.
+    */
+    self->jitThreshold = gDvmJit.threshold;
 #endif
-    if (gDvm.jdwpConfigured) {
-        glue->pDebuggerActive = &gDvm.debuggerActive;
-    } else {
-        glue->pDebuggerActive = NULL;
+
+    /* Handle method entry bookkeeping */
+    if (self->debugIsMethodEntry) {
+        self->debugIsMethodEntry = false;
+        TRACE_METHOD_ENTER(self, self->interpSave.method);
     }
-    glue->pActiveProfilers = &gDvm.activeProfilers;
 
     IF_LOGVV() {
-        char* desc = dexProtoCopyMethodDescriptor(&glue->method->prototype);
+        char* desc = dexProtoCopyMethodDescriptor(
+                         &self->interpSave.method->prototype);
         LOGVV("mterp threadid=%d entry %d: %s.%s %s\n",
             dvmThreadSelf()->threadId,
-            glue->entryPoint,
-            glue->method->clazz->descriptor,
-            glue->method->name,
+            self->entryPoint,
+            self->method->clazz->descriptor,
+            self->method->name,
             desc);
         free(desc);
     }
-    //LOGI("glue is %p, pc=%p, fp=%p\n", glue, glue->pc, glue->fp);
-    //LOGI("first instruction is 0x%04x\n", glue->pc[0]);
+    //LOGI("self is %p, pc=%p, fp=%p\n", self, self->interpSave.pc,
+    //      self->interpSave.fp);
+    //LOGI("first instruction is 0x%04x\n", self->interpSave.pc[0]);
 
-    changeInterp = dvmMterpStdRun(glue);
+    changeInterp = dvmMterpStdRun(self);
 
 #if defined(WITH_JIT)
-    if (glue->jitState != kJitSingleStep) {
-        glue->self->inJitCodeCache = NULL;
+    if (self->jitState != kJitSingleStep) {
+        self->inJitCodeCache = NULL;
     }
 #endif
 
@@ -130,7 +123,7 @@
     } else {
         /* we're "standard", so switch to "debug" */
         LOGVV("  mterp returned, changeInterp=%d\n", changeInterp);
-        glue->nextMode = INTERP_DBG;
+        self->nextMode = INTERP_DBG;
         return true;
     }
 }
diff --git a/vm/mterp/Mterp.h b/vm/mterp/Mterp.h
index 8b3f7b4..69e5a2c 100644
--- a/vm/mterp/Mterp.h
+++ b/vm/mterp/Mterp.h
@@ -27,13 +27,6 @@
 #endif
 
 /*
- * Interpreter state, passed into C functions from assembly stubs.  The
- * assembly code exports all registers into the "glue" structure before
- * calling, then extracts them when the call returns.
- */
-typedef InterpState MterpGlue;
-
-/*
  * Call this during initialization to verify that the values in asm-constants.h
  * are still correct.
  */
@@ -49,7 +42,7 @@
  *
  * The "mterp" interpreter is always "standard".
  */
-bool dvmMterpStdRun(MterpGlue* glue);
-void dvmMterpStdBail(MterpGlue* glue, bool changeInterp);
+bool dvmMterpStdRun(Thread* self);
+void dvmMterpStdBail(Thread* self, bool changeInterp);
 
 #endif /*_DALVIK_MTERP_MTERP*/
diff --git a/vm/mterp/README.txt b/vm/mterp/README.txt
index 9e28ebc..e628fb5 100644
--- a/vm/mterp/README.txt
+++ b/vm/mterp/README.txt
@@ -36,13 +36,17 @@
 some operations (e.g. making use of PLD instructions on ARMv6 or avoiding
 CLZ on ARMv4T).
 
-Two basic assumptions are made about the operation of the interpreter:
+Depending on architecture, instruction-to-instruction transitions may
+be done as either computed goto or jump table.  In the computed goto
+variant, each instruction handler is allocated a fixed-size area (e.g. 64
+byte).  "Overflow" code is tacked on to the end.  In the jump table variant,
+all of the instructions handlers are contiguous and may be of any size.
+The interpreter style is selected via the "handler-size" command (see below).
 
- - The assembly version uses fixed-size areas for each instruction
-   (e.g. 64 bytes).  "Overflow" code is tacked on to the end.
- - When a C implementation is desired, the assembly version packs all
-   local state into a "glue" struct, and passes that into the C function.
-   Updates to the state are pulled out of the "glue" on return.
+When a C implementation for an instruction is desired, the assembly
+version packs all local state into the Thread structure and passes
+that to the C function.  Updates to the state are pulled out of
+"Thread" on return.
 
 The "arch" value should indicate an architecture family with common
 programming characteristics, so "armv5te" would work for all ARMv5TE CPUs,
@@ -58,10 +62,25 @@
 
 The commands are:
 
+  handler-style <computed-goto|jump-table|all-c>
+
+    Specify which style of interpreter to generate.  In computed-goto,
+    each handler is allocated a fixed region, allowing transitions to
+    be done via table-start-address + (opcode * handler-size). With
+    jump-table style, handlers may be of any length, and the generated
+    table is an array of pointers to the handlers. The "all-c" style is
+    for the portable interpreter (which is implemented completely in C).
+    [Note: all-c is distinct from an "allstubs" configuration.  In both
+    configurations, all handlers are the C versions, but the allstubs
+    configuration uses the assembly outer loop and assembly stubs to
+    transition to the handlers].  This command is required, and must be
+    the first command in the config file.
+
   handler-size <bytes>
 
-    Specify the size of the assembly region, in bytes.  On most platforms
-    this will need to be a power of 2.
+    Specify the size of the fixed region, in bytes.  On most platforms
+    this will need to be a power of 2.  For jump-table and all-c
+    implementations, this command is ignored.
 
   import <filename>
 
@@ -71,8 +90,16 @@
 
   asm-stub <filename>
 
-    The named file will be included whenever an assembly "stub" is needed.
-    Text substitution is performed on the opcode name.
+    The named file will be included whenever an assembly "stub" is needed
+    to transfer control to a handler written in C.  Text substitution is
+    performed on the opcode name.  This command is not applicable to
+    to "all-c" configurations.
+
+  asm-alt-stub <filename>
+
+    When present, this command will cause the generation of an alternate
+    set of entry points (for computed-goto interpreters) or an alternate
+    jump table (for jump-table interpreters).
 
   op-start <directory>
 
@@ -88,15 +115,23 @@
     will load from "armv5te/OP_NOP.S".  A substitution dictionary will be
     applied (see below).
 
+  alt <opcode> <directory>
+
+    Can only appear after "op-start" and before "op-end".  Similar to the
+    "op" command above, but denotes a source file to override the entry
+    in the alternate handler table.  The opcode definition will come from
+    the specified file, e.g. "alt OP_NOP armv5te" will load from
+    "armv5te/ALT_OP_NOP.S".  A substitution dictionary will be applied
+    (see below).
+
   op-end
 
     Indicates the end of the opcode list.  All kNumPackedOpcodes
     opcodes are emitted when this is seen, followed by any code that
     didn't fit inside the fixed-size instruction handler space.
 
-
-The order of "op" directives is not significant; the generation tool will
-extract ordering info from the VM sources.
+The order of "op" and "alt" directives are not significant; the generation
+tool will extract ordering info from the VM sources.
 
 Typically the form in which most opcodes currently exist is used in
 the "op-start" directive.  For a new port you would start with "c",
@@ -160,6 +195,7 @@
     Identifies the split between the main portion of the instruction
     handler (which must fit in "handler-size" bytes) and the "sister"
     code, which is appended to the end of the instruction handler block.
+    In jump table implementations, %break is ignored.
 
   %verify "message"
 
diff --git a/vm/mterp/armv5te/OP_APUT_OBJECT.S b/vm/mterp/armv5te/OP_APUT_OBJECT.S
index 00b3cc8..8d9a84c 100644
--- a/vm/mterp/armv5te/OP_APUT_OBJECT.S
+++ b/vm/mterp/armv5te/OP_APUT_OBJECT.S
@@ -35,7 +35,7 @@
     beq     .L${opcode}_throw           @ no
     mov     r1, rINST                   @ r1<- arrayObj
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
-    ldr     r2, [rGLUE, #offGlue_cardTable]     @ get biased CT base
+    ldr     r2, [rSELF, #offThread_cardTable]     @ get biased CT base
     add     r10, #offArrayObject_contents   @ r0<- pointer to slot
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     str     r9, [r10]                   @ vBB[vCC]<- vAA
diff --git a/vm/mterp/armv5te/OP_CHECK_CAST.S b/vm/mterp/armv5te/OP_CHECK_CAST.S
index 3a07ea3..57df60e 100644
--- a/vm/mterp/armv5te/OP_CHECK_CAST.S
+++ b/vm/mterp/armv5te/OP_CHECK_CAST.S
@@ -12,7 +12,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     FETCH(r2, 1)                        @ r2<- BBBB
     GET_VREG(r9, r3)                    @ r9<- object
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
     cmp     r9, #0                      @ is object null?
     ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
     beq     .L${opcode}_okay            @ null obj, cast always succeeds
@@ -56,7 +56,7 @@
      */
 .L${opcode}_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r1, r2                      @ r1<- BBBB
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
diff --git a/vm/mterp/armv5te/OP_CHECK_CAST_JUMBO.S b/vm/mterp/armv5te/OP_CHECK_CAST_JUMBO.S
new file mode 100644
index 0000000..3140ec4
--- /dev/null
+++ b/vm/mterp/armv5te/OP_CHECK_CAST_JUMBO.S
@@ -0,0 +1,75 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r3, 3)                        @ r3<- BBBB
+    orr     r2, r0, r2, lsl #16         @ r2<- AAAAaaaa
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .L${opcode}_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+.L${opcode}_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .L${opcode}_fullcheck       @ no, do full check
+    b       .L${opcode}_okay            @ yes, finish up
+%break
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds desired class resolved from AAAAAAAA
+     *  r9 holds object
+     */
+.L${opcode}_fullcheck:
+    mov     r10, r1                     @ avoid ClassObject getting clobbered
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .L${opcode}_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC()                         @ about to throw
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz (actual class)
+    mov     r1, r10                     @ r1<- desired class
+    bl      dvmThrowClassCastException
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get the next opcode.
+     */
+.L${opcode}_okay:
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds AAAAAAAA
+     *  r9 holds object
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .L${opcode}_resolved        @ pick up where we left off
diff --git a/vm/mterp/armv5te/OP_CONST_CLASS.S b/vm/mterp/armv5te/OP_CONST_CLASS.S
index 665e582..9256bf9 100644
--- a/vm/mterp/armv5te/OP_CONST_CLASS.S
+++ b/vm/mterp/armv5te/OP_CONST_CLASS.S
@@ -4,7 +4,7 @@
 %verify "Class cannot be resolved"
     /* const/class vAA, Class@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
@@ -23,7 +23,7 @@
      */
 .L${opcode}_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- Class reference
diff --git a/vm/mterp/armv5te/OP_CONST_CLASS_JUMBO.S b/vm/mterp/armv5te/OP_CONST_CLASS_JUMBO.S
new file mode 100644
index 0000000..4dd973e
--- /dev/null
+++ b/vm/mterp/armv5te/OP_CONST_CLASS_JUMBO.S
@@ -0,0 +1,37 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<-self>methodClassDex
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[AAAAaaaa]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .L${opcode}_resolve
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: AAAAAAAA (Class ref)
+     *  r9: target register
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_CONST_STRING.S b/vm/mterp/armv5te/OP_CONST_STRING.S
index 2df3fda..bad36e4 100644
--- a/vm/mterp/armv5te/OP_CONST_STRING.S
+++ b/vm/mterp/armv5te/OP_CONST_STRING.S
@@ -4,7 +4,7 @@
 %verify "String cannot be resolved"
     /* const/string vAA, String@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
@@ -23,7 +23,7 @@
      */
 .L${opcode}_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
diff --git a/vm/mterp/armv5te/OP_CONST_STRING_JUMBO.S b/vm/mterp/armv5te/OP_CONST_STRING_JUMBO.S
index cf9b009..05897f7 100644
--- a/vm/mterp/armv5te/OP_CONST_STRING_JUMBO.S
+++ b/vm/mterp/armv5te/OP_CONST_STRING_JUMBO.S
@@ -5,7 +5,7 @@
     /* const/string vAA, String@BBBBBBBB */
     FETCH(r0, 1)                        @ r0<- bbbb (low)
     FETCH(r1, 2)                        @ r1<- BBBB (high)
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
@@ -25,7 +25,7 @@
      */
 .L${opcode}_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
diff --git a/vm/mterp/armv5te/OP_DISPATCH_FF.S b/vm/mterp/armv5te/OP_DISPATCH_FF.S
index faa7246..1ff7981 100644
--- a/vm/mterp/armv5te/OP_DISPATCH_FF.S
+++ b/vm/mterp/armv5te/OP_DISPATCH_FF.S
@@ -1 +1,5 @@
-%include "armv5te/unused.S"
+%verify "executed"
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
index d29523d..e97ff36 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
@@ -12,11 +12,11 @@
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #12          @ r0<- B
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .L${opcode}_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
index 38ab35b..7ef2400 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
@@ -13,11 +13,11 @@
      */
     /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #8           @ r0<- AA
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .L${opcode}_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
diff --git a/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY.S b/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY.S
index 5bb2d43..8c79cc1 100644
--- a/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY.S
+++ b/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY.S
@@ -8,7 +8,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -16,7 +16,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .L${opcode}_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -49,8 +49,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -82,9 +82,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -95,14 +95,11 @@
      * mode of filled-new-array.
      */
 .L${opcode}_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!$isrange)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
diff --git a/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S b/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..a3f1695
--- /dev/null
+++ b/vm/mterp/armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,77 @@
+%verify "executed"
+%verify "unimplemented array type"
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ need for resolve and alloc
+    cmp     r0, #0                      @ already resolved?
+    bne     .L${opcode}_continue        @ yes, continue on
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .L${opcode}_continue
+%break
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     */
+.L${opcode}_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    rINST, [r3, #1]             @ rINST<- descriptor[1]
+    FETCH(r1, 3)                        @ r1<- BBBB (length)
+    cmp     rINST, #'I'                 @ array of ints?
+    cmpne   rINST, #'L'                 @ array of objects?
+    cmpne   rINST, #'['                 @ array of arrays?
+    mov     r9, r1                      @ save length in r9
+    bne     .L${opcode}_notimpl         @ no, not handled yet
+    bl      dvmAllocArrayByClass        @ r0<- call(arClass, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 4)                        @ r1<- CCCC
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(5)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC, r9=BBBB (length)
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+
+2:  ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
+    cmp     r1, #'I'                         @ Is int array?
+    strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+    GOTO_OPCODE(ip)                          @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.L${opcode}_notimpl:
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
+    b       common_exceptionThrown
diff --git a/vm/mterp/armv5te/OP_IGET.S b/vm/mterp/armv5te/OP_IGET.S
index b9cdee4..a81467c 100644
--- a/vm/mterp/armv5te/OP_IGET.S
+++ b/vm/mterp/armv5te/OP_IGET.S
@@ -11,14 +11,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv5te/OP_IGET_BOOLEAN_JUMBO.S b/vm/mterp/armv5te/OP_IGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..a1e2456
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrb", "sqnum":"1" }
+%include "armv5te/OP_IGET_JUMBO.S" { "load":"ldr", "sqnum":"1" }
diff --git a/vm/mterp/armv5te/OP_IGET_BYTE_JUMBO.S b/vm/mterp/armv5te/OP_IGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..302f67f
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_BYTE_JUMBO.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsb", "sqnum":"2" }
+%include "armv5te/OP_IGET_JUMBO.S" { "load":"ldr", "sqnum":"2" }
diff --git a/vm/mterp/armv5te/OP_IGET_CHAR_JUMBO.S b/vm/mterp/armv5te/OP_IGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..c205ca6
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_CHAR_JUMBO.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrh", "sqnum":"3" }
+%include "armv5te/OP_IGET_JUMBO.S" { "load":"ldr", "sqnum":"3" }
diff --git a/vm/mterp/armv5te/OP_IGET_JUMBO.S b/vm/mterp/armv5te/OP_IGET_JUMBO.S
new file mode 100644
index 0000000..6ed201f
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_JUMBO.S
@@ -0,0 +1,57 @@
+%default { "load":"ldr", "barrier":"@ no-op ", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to ${opcode}_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    @bl      common_squeak${sqnum}
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    $load   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    $barrier                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_IGET_OBJECT_JUMBO.S b/vm/mterp/armv5te/OP_IGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..d1260fe
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..fb4bf63
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_IGET_SHORT_JUMBO.S b/vm/mterp/armv5te/OP_IGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..81c2f77
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_SHORT_JUMBO.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsh", "sqnum":"4" }
+%include "armv5te/OP_IGET_JUMBO.S" { "load":"ldr", "sqnum":"4" }
diff --git a/vm/mterp/armv5te/OP_IGET_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IGET_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..6d2b5ff
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_IGET_WIDE.S b/vm/mterp/armv5te/OP_IGET_WIDE.S
index 95944de..c73edfd 100644
--- a/vm/mterp/armv5te/OP_IGET_WIDE.S
+++ b/vm/mterp/armv5te/OP_IGET_WIDE.S
@@ -9,14 +9,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv5te/OP_IGET_WIDE_JUMBO.S b/vm/mterp/armv5te/OP_IGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..40a6a96
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_WIDE_JUMBO.S
@@ -0,0 +1,58 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to ${opcode}_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     $volatile
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..c38a73d
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/armv5te/OP_INSTANCE_OF.S b/vm/mterp/armv5te/OP_INSTANCE_OF.S
index 66f0df3..73911b1 100644
--- a/vm/mterp/armv5te/OP_INSTANCE_OF.S
+++ b/vm/mterp/armv5te/OP_INSTANCE_OF.S
@@ -17,7 +17,7 @@
     GET_VREG(r0, r3)                    @ r0<- vB (object)
     and     r9, r9, #15                 @ r9<- A
     cmp     r0, #0                      @ is object null?
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
     beq     .L${opcode}_store           @ null obj, not an instance, store r0
     FETCH(r3, 1)                        @ r3<- CCCC
     ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
@@ -71,7 +71,7 @@
      */
 .L${opcode}_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     mov     r1, r3                      @ r1<- BBBB
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
diff --git a/vm/mterp/armv5te/OP_INSTANCE_OF_JUMBO.S b/vm/mterp/armv5te/OP_INSTANCE_OF_JUMBO.S
new file mode 100644
index 0000000..1de1222
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INSTANCE_OF_JUMBO.S
@@ -0,0 +1,98 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    FETCH(r9, 3)                        @ r9<- vBBBB
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
+    cmp     r0, #0                      @ is object null?
+    beq     .L${opcode}_store           @ null obj, not an instance, store r0
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    orr     r3, r1, r3, lsl #16         @ r3<- AAAAaaaa
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.L${opcode}_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .L${opcode}_trivial         @ yes, trivial finish
+    @ fall through to ${opcode}_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.L${opcode}_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to ${opcode}_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds BBBB
+     */
+.L${opcode}_store:
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.L${opcode}_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b ${opcode}_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.L${opcode}_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    mov     r1, r3                      @ r1<- AAAAAAAA
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .L${opcode}_resolved        @ pick up where we left off
diff --git a/vm/mterp/armv5te/OP_INVOKE_DIRECT.S b/vm/mterp/armv5te/OP_INVOKE_DIRECT.S
index 14ba8f7..15b173f 100644
--- a/vm/mterp/armv5te/OP_INVOKE_DIRECT.S
+++ b/vm/mterp/armv5te/OP_INVOKE_DIRECT.S
@@ -13,7 +13,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -37,7 +37,7 @@
      *  r10 = "this" register
      */
 .L${opcode}_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
diff --git a/vm/mterp/armv5te/OP_INVOKE_DIRECT_EMPTY.S b/vm/mterp/armv5te/OP_INVOKE_DIRECT_EMPTY.S
deleted file mode 100644
index 3c6b192..0000000
--- a/vm/mterp/armv5te/OP_INVOKE_DIRECT_EMPTY.S
+++ /dev/null
@@ -1,7 +0,0 @@
-%verify "executed"
-    /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
-     */
-    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
-    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
-    GOTO_OPCODE(ip)                     @ execute it
diff --git a/vm/mterp/armv5te/OP_INVOKE_DIRECT_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_DIRECT_JUMBO.S
new file mode 100644
index 0000000..5613fbb
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_DIRECT_JUMBO.S
@@ -0,0 +1,43 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+.L${opcode}_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodJumbo    @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+%break
+
+    /*
+     * On entry:
+     *  r1 = reference (CCCC)
+     *  r10 = "this" register
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .L${opcode}_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
diff --git a/vm/mterp/armv5te/OP_INVOKE_INTERFACE.S b/vm/mterp/armv5te/OP_INVOKE_INTERFACE.S
index 7d52454..3149775 100644
--- a/vm/mterp/armv5te/OP_INVOKE_INTERFACE.S
+++ b/vm/mterp/armv5te/OP_INVOKE_INTERFACE.S
@@ -16,9 +16,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
diff --git a/vm/mterp/armv5te/OP_INVOKE_INTERFACE_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_INTERFACE_JUMBO.S
new file mode 100644
index 0000000..930d7d5
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_INTERFACE_JUMBO.S
@@ -0,0 +1,22 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r2, 4)                        @ r2<- CCCC
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    EXPORT_PC()                         @ must export for invoke
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodJumbo    @ jump to common handler
diff --git a/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S
new file mode 100644
index 0000000..47e3a4a
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S" {"cccc":"4"}
diff --git a/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S
new file mode 100644
index 0000000..67a3cc7
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -0,0 +1,20 @@
+%default { "cccc":"2" }
+%verify "executed"
+%verify "finalizable class"
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
+     */
+    FETCH(r1, ${cccc})                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(${cccc}+1)       @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
diff --git a/vm/mterp/armv5te/OP_INVOKE_STATIC.S b/vm/mterp/armv5te/OP_INVOKE_STATIC.S
index cb359e6..47718c6 100644
--- a/vm/mterp/armv5te/OP_INVOKE_STATIC.S
+++ b/vm/mterp/armv5te/OP_INVOKE_STATIC.S
@@ -8,14 +8,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethod${routine} @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
diff --git a/vm/mterp/armv5te/OP_INVOKE_STATIC_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_STATIC_JUMBO.S
new file mode 100644
index 0000000..3d04534
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_STATIC_JUMBO.S
@@ -0,0 +1,22 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodJumbo    @ yes, continue on
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodJumbo    @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
diff --git a/vm/mterp/armv5te/OP_INVOKE_SUPER.S b/vm/mterp/armv5te/OP_INVOKE_SUPER.S
index 6117947..bf3458c 100644
--- a/vm/mterp/armv5te/OP_INVOKE_SUPER.S
+++ b/vm/mterp/armv5te/OP_INVOKE_SUPER.S
@@ -9,7 +9,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!$isrange)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -18,7 +18,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
diff --git a/vm/mterp/armv5te/OP_INVOKE_SUPER_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_SUPER_JUMBO.S
new file mode 100644
index 0000000..85327cc
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_SUPER_JUMBO.S
@@ -0,0 +1,55 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .L${opcode}_continue        @ resolved, continue on
+    b       .L${opcode}_resolve         @ do resolve now
+%break
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.L${opcode}_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .L${opcode}_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+.L${opcode}_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.L${opcode}_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
diff --git a/vm/mterp/armv5te/OP_INVOKE_SUPER_QUICK.S b/vm/mterp/armv5te/OP_INVOKE_SUPER_QUICK.S
index bd07d06..e967b46 100644
--- a/vm/mterp/armv5te/OP_INVOKE_SUPER_QUICK.S
+++ b/vm/mterp/armv5te/OP_INVOKE_SUPER_QUICK.S
@@ -9,7 +9,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!$isrange)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
diff --git a/vm/mterp/armv5te/OP_INVOKE_VIRTUAL.S b/vm/mterp/armv5te/OP_INVOKE_VIRTUAL.S
index d92c6a9..371006b 100644
--- a/vm/mterp/armv5te/OP_INVOKE_VIRTUAL.S
+++ b/vm/mterp/armv5te/OP_INVOKE_VIRTUAL.S
@@ -9,7 +9,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -20,7 +20,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .L${opcode}_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
diff --git a/vm/mterp/armv5te/OP_INVOKE_VIRTUAL_JUMBO.S b/vm/mterp/armv5te/OP_INVOKE_VIRTUAL_JUMBO.S
new file mode 100644
index 0000000..3d77072
--- /dev/null
+++ b/vm/mterp/armv5te/OP_INVOKE_VIRTUAL_JUMBO.S
@@ -0,0 +1,39 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a virtual method call.
+     */
+    /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .L${opcode}_continue        @ yes, continue on
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+%break
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     */
+.L${opcode}_continue:
+    FETCH(r10, 4)                       @ r10<- CCCC
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
diff --git a/vm/mterp/armv5te/OP_IPUT.S b/vm/mterp/armv5te/OP_IPUT.S
index 53f4b4e..13836fb 100644
--- a/vm/mterp/armv5te/OP_IPUT.S
+++ b/vm/mterp/armv5te/OP_IPUT.S
@@ -11,14 +11,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv5te/OP_IPUT_BOOLEAN_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..9d89c9a
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"1" }
+%include "armv5te/OP_IPUT_JUMBO.S" { "store":"str", "sqnum":"1" }
diff --git a/vm/mterp/armv5te/OP_IPUT_BYTE_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..8378f49
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_BYTE_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"2" }
+%include "armv5te/OP_IPUT_JUMBO.S" { "store":"str", "sqnum":"2" }
diff --git a/vm/mterp/armv5te/OP_IPUT_CHAR_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..9d6a5b0
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_CHAR_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"3" }
+%include "armv5te/OP_IPUT_JUMBO.S" { "store":"str", "sqnum":"3" }
diff --git a/vm/mterp/armv5te/OP_IPUT_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_JUMBO.S
new file mode 100644
index 0000000..2c11dac
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_JUMBO.S
@@ -0,0 +1,57 @@
+%default { "store":"str", "barrier":"@ no-op ", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to ${opcode}_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    @bl      common_squeak${sqnum}
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $barrier                            @ releasing store
+    $store  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_IPUT_OBJECT.S b/vm/mterp/armv5te/OP_IPUT_OBJECT.S
index 079094e..7cab4c5 100644
--- a/vm/mterp/armv5te/OP_IPUT_OBJECT.S
+++ b/vm/mterp/armv5te/OP_IPUT_OBJECT.S
@@ -11,14 +11,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -39,7 +39,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
diff --git a/vm/mterp/armv5te/OP_IPUT_OBJECT_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..11b6d68
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_OBJECT_JUMBO.S
@@ -0,0 +1,57 @@
+%default { "barrier":"@ no-op ", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to ${opcode}_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    @bl      common_squeak${sqnum}
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $barrier                            @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_IPUT_OBJECT_QUICK.S b/vm/mterp/armv5te/OP_IPUT_OBJECT_QUICK.S
index 7e7144a..7bf9b21 100644
--- a/vm/mterp/armv5te/OP_IPUT_OBJECT_QUICK.S
+++ b/vm/mterp/armv5te/OP_IPUT_OBJECT_QUICK.S
@@ -10,7 +10,7 @@
     beq     common_errNullObject        @ object was null
     and     r2, r2, #15
     GET_VREG(r0, r2)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
     cmp     r0, #0
diff --git a/vm/mterp/armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..0d27fe5
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IPUT_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_IPUT_SHORT_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..889c723
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_SHORT_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"4" }
+%include "armv5te/OP_IPUT_JUMBO.S" { "store":"str", "sqnum":"4" }
diff --git a/vm/mterp/armv5te/OP_IPUT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..af7d9eb
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IPUT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_IPUT_WIDE.S b/vm/mterp/armv5te/OP_IPUT_WIDE.S
index 8796cbb..d9a6fc3 100644
--- a/vm/mterp/armv5te/OP_IPUT_WIDE.S
+++ b/vm/mterp/armv5te/OP_IPUT_WIDE.S
@@ -6,14 +6,14 @@
 %verify "field cannot be resolved"
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv5te/OP_IPUT_WIDE_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..a371f1c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_WIDE_JUMBO.S
@@ -0,0 +1,55 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .L${opcode}_resolved        @ resolved, continue
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to ${opcode}_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     $volatile
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..53b1030
--- /dev/null
+++ b/vm/mterp/armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_IPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/armv5te/OP_MONITOR_ENTER.S b/vm/mterp/armv5te/OP_MONITOR_ENTER.S
index 36faabc..ba5a144 100644
--- a/vm/mterp/armv5te/OP_MONITOR_ENTER.S
+++ b/vm/mterp/armv5te/OP_MONITOR_ENTER.S
@@ -6,17 +6,11 @@
     /* monitor-enter vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     cmp     r1, #0                      @ null object?
-    EXPORT_PC()                         @ need for precise GC, MONITOR_TRACKING
+    EXPORT_PC()                         @ need for precise GC
     beq     common_errNullObject        @ null object, throw an exception
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     bl      dvmLockObject               @ call(self, obj)
-#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
-    ldr     r1, [r0, #offThread_exception] @ check for exception
-    cmp     r1, #0
-    bne     common_exceptionThrown      @ exception raised, bail out
-#endif
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_MONITOR_EXIT.S b/vm/mterp/armv5te/OP_MONITOR_EXIT.S
index 5c1b3c7..9f36f0e 100644
--- a/vm/mterp/armv5te/OP_MONITOR_EXIT.S
+++ b/vm/mterp/armv5te/OP_MONITOR_EXIT.S
@@ -14,7 +14,7 @@
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
     cmp     r1, #0                      @ null object?
     beq     1f                          @ yes
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
     cmp     r0, #0                      @ failed?
     FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
diff --git a/vm/mterp/armv5te/OP_MOVE_EXCEPTION.S b/vm/mterp/armv5te/OP_MOVE_EXCEPTION.S
index f9e4cff..e2fc66f 100644
--- a/vm/mterp/armv5te/OP_MOVE_EXCEPTION.S
+++ b/vm/mterp/armv5te/OP_MOVE_EXCEPTION.S
@@ -1,11 +1,10 @@
 %verify "executed"
     /* move-exception vAA */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    ldr     r3, [rSELF, #offThread_exception]  @ r3<- dvmGetException bypass
     mov     r1, #0                      @ r1<- 0
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
-    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    str     r1, [rSELF, #offThread_exception]  @ dvmClearException bypass
     GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_MOVE_RESULT.S b/vm/mterp/armv5te/OP_MOVE_RESULT.S
index 9de8401..72377f8 100644
--- a/vm/mterp/armv5te/OP_MOVE_RESULT.S
+++ b/vm/mterp/armv5te/OP_MOVE_RESULT.S
@@ -3,7 +3,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_MOVE_RESULT_WIDE.S b/vm/mterp/armv5te/OP_MOVE_RESULT_WIDE.S
index 92f7443..4eb0198 100644
--- a/vm/mterp/armv5te/OP_MOVE_RESULT_WIDE.S
+++ b/vm/mterp/armv5te/OP_MOVE_RESULT_WIDE.S
@@ -1,7 +1,7 @@
 %verify "executed"
     /* move-result-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
diff --git a/vm/mterp/armv5te/OP_NEW_ARRAY.S b/vm/mterp/armv5te/OP_NEW_ARRAY.S
index da93c45..eca1ac6 100644
--- a/vm/mterp/armv5te/OP_NEW_ARRAY.S
+++ b/vm/mterp/armv5te/OP_NEW_ARRAY.S
@@ -11,12 +11,12 @@
     /* new-array vA, vB, class@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
     FETCH(r2, 1)                        @ r2<- CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     GET_VREG(r1, r0)                    @ r1<- vB (array length)
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     cmp     r1, #0                      @ check length
     ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
-    bmi     common_errNegativeArraySize @ negative length, bail
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ req'd for resolve, alloc
     bne     .L${opcode}_finish          @ resolved, continue
@@ -31,7 +31,7 @@
      *  r2 holds class ref CCCC
      */
 .L${opcode}_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r9, r1                      @ r9<- length (save)
     mov     r1, r2                      @ r1<- CCCC
     mov     r2, #0                      @ r2<- false
diff --git a/vm/mterp/armv5te/OP_NEW_ARRAY_JUMBO.S b/vm/mterp/armv5te/OP_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..568afb4
--- /dev/null
+++ b/vm/mterp/armv5te/OP_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,62 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r2, 1)                        @ r2<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- vCCCC
+    orr     r2, r2, r3, lsl #16         @ r2<- AAAAaaaa
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vCCCC (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .L${opcode}_finish          @ resolved, continue
+    b       .L${opcode}_resolve         @ do resolve now
+%break
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref AAAAAAAA
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to ${opcode}_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.L${opcode}_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    FETCH(r2, 3)                        @ r2<- vBBBB
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_NEW_INSTANCE.S b/vm/mterp/armv5te/OP_NEW_INSTANCE.S
index 2687e55..ce1f0c8 100644
--- a/vm/mterp/armv5te/OP_NEW_INSTANCE.S
+++ b/vm/mterp/armv5te/OP_NEW_INSTANCE.S
@@ -10,7 +10,7 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
@@ -56,13 +56,10 @@
      *  r1 holds BBBB
      */
 .L${opcode}_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
     cmp     r0, #0                      @ got null?
     bne     .L${opcode}_resolved        @ no, continue
     b       common_exceptionThrown      @ yes, handle exception
-
-.LstrInstantiationErrorPtr:
-    .word   .LstrInstantiationError
diff --git a/vm/mterp/armv5te/OP_NEW_INSTANCE_JUMBO.S b/vm/mterp/armv5te/OP_NEW_INSTANCE_JUMBO.S
new file mode 100644
index 0000000..fad14a8
--- /dev/null
+++ b/vm/mterp/armv5te/OP_NEW_INSTANCE_JUMBO.S
@@ -0,0 +1,67 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .L${opcode}_resolve         @ no, resolve it now
+.L${opcode}_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .L${opcode}_needinit        @ no, init class now
+.L${opcode}_initialized: @ r0=class
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    bl      dvmAllocObject              @ r0<- new object
+    b       .L${opcode}_finish          @ continue
+%break
+
+    .balign 32                          @ minimize cache lines
+.L${opcode}_finish: @ r0=new object
+    FETCH(r3, 3)                        @ r3<- BBBB
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.L${opcode}_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .L${opcode}_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds AAAAAAAA
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
diff --git a/vm/mterp/armv5te/OP_RETURN.S b/vm/mterp/armv5te/OP_RETURN.S
index 8838182..5f7350a 100644
--- a/vm/mterp/armv5te/OP_RETURN.S
+++ b/vm/mterp/armv5te/OP_RETURN.S
@@ -1,6 +1,6 @@
 %verify "executed"
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -8,5 +8,5 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
diff --git a/vm/mterp/armv5te/OP_RETURN_WIDE.S b/vm/mterp/armv5te/OP_RETURN_WIDE.S
index 33880de..c185077 100644
--- a/vm/mterp/armv5te/OP_RETURN_WIDE.S
+++ b/vm/mterp/armv5te/OP_RETURN_WIDE.S
@@ -1,12 +1,12 @@
 %verify "executed"
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
     stmia   r3, {r0-r1}                 @ retval<- r0/r1
     b       common_returnFromMethod
diff --git a/vm/mterp/armv5te/OP_SGET.S b/vm/mterp/armv5te/OP_SGET.S
index c803d27..14fc63a 100644
--- a/vm/mterp/armv5te/OP_SGET.S
+++ b/vm/mterp/armv5te/OP_SGET.S
@@ -9,7 +9,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -30,7 +30,7 @@
      *  r1: BBBB field ref
      */
 .L${opcode}_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
diff --git a/vm/mterp/armv5te/OP_SGET_BOOLEAN_JUMBO.S b/vm/mterp/armv5te/OP_SGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..b38ce7c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SGET_BYTE_JUMBO.S b/vm/mterp/armv5te/OP_SGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..b38ce7c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SGET_CHAR_JUMBO.S b/vm/mterp/armv5te/OP_SGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..b38ce7c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SGET_JUMBO.S b/vm/mterp/armv5te/OP_SGET_JUMBO.S
new file mode 100644
index 0000000..374b34c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_JUMBO.S
@@ -0,0 +1,42 @@
+%default { "barrier":"@ no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    $barrier                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
diff --git a/vm/mterp/armv5te/OP_SGET_OBJECT_JUMBO.S b/vm/mterp/armv5te/OP_SGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..b38ce7c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..bebc805
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_SGET_SHORT_JUMBO.S b/vm/mterp/armv5te/OP_SGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..b38ce7c
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SGET_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SGET_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..9075c28
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_SGET_WIDE.S b/vm/mterp/armv5te/OP_SGET_WIDE.S
index 768b9da..73105c6 100644
--- a/vm/mterp/armv5te/OP_SGET_WIDE.S
+++ b/vm/mterp/armv5te/OP_SGET_WIDE.S
@@ -7,7 +7,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -35,7 +35,7 @@
      * Returns StaticField pointer in r0.
      */
 .L${opcode}_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
diff --git a/vm/mterp/armv5te/OP_SGET_WIDE_JUMBO.S b/vm/mterp/armv5te/OP_SGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..c1f5cc7
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_WIDE_JUMBO.S
@@ -0,0 +1,46 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if $volatile
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
diff --git a/vm/mterp/armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..1e4b2de
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/armv5te/OP_SPUT.S b/vm/mterp/armv5te/OP_SPUT.S
index e709b22..0208ccc 100644
--- a/vm/mterp/armv5te/OP_SPUT.S
+++ b/vm/mterp/armv5te/OP_SPUT.S
@@ -9,7 +9,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -30,7 +30,7 @@
      *  r1: BBBB field ref
      */
 .L${opcode}_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
diff --git a/vm/mterp/armv5te/OP_SPUT_BOOLEAN_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..e8a64be
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SPUT_BYTE_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..e8a64be
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SPUT_CHAR_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..e8a64be
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SPUT_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_JUMBO.S
new file mode 100644
index 0000000..7011b67
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_JUMBO.S
@@ -0,0 +1,42 @@
+%default { "barrier":"@ no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $barrier                            @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
diff --git a/vm/mterp/armv5te/OP_SPUT_OBJECT.S b/vm/mterp/armv5te/OP_SPUT_OBJECT.S
index fe9fa4c..8e0c16d 100644
--- a/vm/mterp/armv5te/OP_SPUT_OBJECT.S
+++ b/vm/mterp/armv5te/OP_SPUT_OBJECT.S
@@ -9,13 +9,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -28,7 +28,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     $barrier                            @ releasing store
diff --git a/vm/mterp/armv5te/OP_SPUT_OBJECT_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..e10c793
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_OBJECT_JUMBO.S
@@ -0,0 +1,38 @@
+%default { "barrier":"@ no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+%break
+
+.L${opcode}_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $barrier                            @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..78fe07b
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_SPUT_SHORT_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..e8a64be
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/armv5te/OP_SPUT_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..7049fc6
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/armv5te/OP_SPUT_WIDE.S b/vm/mterp/armv5te/OP_SPUT_WIDE.S
index 330c72b..1f650f2 100644
--- a/vm/mterp/armv5te/OP_SPUT_WIDE.S
+++ b/vm/mterp/armv5te/OP_SPUT_WIDE.S
@@ -7,7 +7,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -36,7 +36,7 @@
      * Returns StaticField pointer in r2.
      */
 .L${opcode}_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
diff --git a/vm/mterp/armv5te/OP_SPUT_WIDE_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..2bfeae1
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_WIDE_JUMBO.S
@@ -0,0 +1,48 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if $volatile
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
diff --git a/vm/mterp/armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..c1b0991
--- /dev/null
+++ b/vm/mterp/armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te/OP_SPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/armv5te/OP_THROW.S b/vm/mterp/armv5te/OP_THROW.S
index dd0a0b8..6e157b4 100644
--- a/vm/mterp/armv5te/OP_THROW.S
+++ b/vm/mterp/armv5te/OP_THROW.S
@@ -6,10 +6,9 @@
     /* throw vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     EXPORT_PC()                         @ exception handler can throw
     cmp     r1, #0                      @ null object?
     beq     common_errNullObject        @ yes, throw an NPE instead
     @ bypass dvmSetException, just store it
-    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    str     r1, [rSELF, #offThread_exception]  @ thread->exception<- obj
     b       common_exceptionThrown
diff --git a/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR.S b/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR.S
index 8bd4f35..afe9fd8 100644
--- a/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR.S
+++ b/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR.S
@@ -5,7 +5,7 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     FETCH(r2, 1)                        @ r2<- BBBB
     EXPORT_PC()                         @ export the PC
     mov     r1, rINST, lsr #8           @ r1<- AA
diff --git a/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S b/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S
new file mode 100644
index 0000000..f42ba76
--- /dev/null
+++ b/vm/mterp/armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S
@@ -0,0 +1,15 @@
+%verify executed
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, Class@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    orr     r2, r1, r2, lsl #16         @ r2<- AAAAaaaa
+    EXPORT_PC()                         @ export the PC
+    FETCH(r1, 3)                        @ r1<- BBBB
+    bl      dvmThrowVerificationError   @ always throws
+    b       common_exceptionThrown      @ handle exception
diff --git a/vm/mterp/armv5te/OP_UNUSED_27FF.S b/vm/mterp/armv5te/OP_UNUSED_27FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_27FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_28FF.S b/vm/mterp/armv5te/OP_UNUSED_28FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_28FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_29FF.S b/vm/mterp/armv5te/OP_UNUSED_29FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_29FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2AFF.S b/vm/mterp/armv5te/OP_UNUSED_2AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2BFF.S b/vm/mterp/armv5te/OP_UNUSED_2BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2CFF.S b/vm/mterp/armv5te/OP_UNUSED_2CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2DFF.S b/vm/mterp/armv5te/OP_UNUSED_2DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2EFF.S b/vm/mterp/armv5te/OP_UNUSED_2EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_2FFF.S b/vm/mterp/armv5te/OP_UNUSED_2FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_2FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_30FF.S b/vm/mterp/armv5te/OP_UNUSED_30FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_30FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_31FF.S b/vm/mterp/armv5te/OP_UNUSED_31FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_31FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_32FF.S b/vm/mterp/armv5te/OP_UNUSED_32FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_32FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_33FF.S b/vm/mterp/armv5te/OP_UNUSED_33FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_33FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_34FF.S b/vm/mterp/armv5te/OP_UNUSED_34FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_34FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_35FF.S b/vm/mterp/armv5te/OP_UNUSED_35FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_35FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_36FF.S b/vm/mterp/armv5te/OP_UNUSED_36FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_36FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_37FF.S b/vm/mterp/armv5te/OP_UNUSED_37FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_37FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_38FF.S b/vm/mterp/armv5te/OP_UNUSED_38FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_38FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_39FF.S b/vm/mterp/armv5te/OP_UNUSED_39FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_39FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3AFF.S b/vm/mterp/armv5te/OP_UNUSED_3AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3BFF.S b/vm/mterp/armv5te/OP_UNUSED_3BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3CFF.S b/vm/mterp/armv5te/OP_UNUSED_3CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3DFF.S b/vm/mterp/armv5te/OP_UNUSED_3DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3EFF.S b/vm/mterp/armv5te/OP_UNUSED_3EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_3FFF.S b/vm/mterp/armv5te/OP_UNUSED_3FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_3FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_40FF.S b/vm/mterp/armv5te/OP_UNUSED_40FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_40FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_41FF.S b/vm/mterp/armv5te/OP_UNUSED_41FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_41FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_42FF.S b/vm/mterp/armv5te/OP_UNUSED_42FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_42FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_43FF.S b/vm/mterp/armv5te/OP_UNUSED_43FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_43FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_44FF.S b/vm/mterp/armv5te/OP_UNUSED_44FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_44FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_45FF.S b/vm/mterp/armv5te/OP_UNUSED_45FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_45FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_46FF.S b/vm/mterp/armv5te/OP_UNUSED_46FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_46FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_47FF.S b/vm/mterp/armv5te/OP_UNUSED_47FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_47FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_48FF.S b/vm/mterp/armv5te/OP_UNUSED_48FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_48FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_49FF.S b/vm/mterp/armv5te/OP_UNUSED_49FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_49FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4AFF.S b/vm/mterp/armv5te/OP_UNUSED_4AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4BFF.S b/vm/mterp/armv5te/OP_UNUSED_4BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4CFF.S b/vm/mterp/armv5te/OP_UNUSED_4CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4DFF.S b/vm/mterp/armv5te/OP_UNUSED_4DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4EFF.S b/vm/mterp/armv5te/OP_UNUSED_4EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_4FFF.S b/vm/mterp/armv5te/OP_UNUSED_4FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_4FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_50FF.S b/vm/mterp/armv5te/OP_UNUSED_50FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_50FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_51FF.S b/vm/mterp/armv5te/OP_UNUSED_51FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_51FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_52FF.S b/vm/mterp/armv5te/OP_UNUSED_52FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_52FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_53FF.S b/vm/mterp/armv5te/OP_UNUSED_53FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_53FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_54FF.S b/vm/mterp/armv5te/OP_UNUSED_54FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_54FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_55FF.S b/vm/mterp/armv5te/OP_UNUSED_55FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_55FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_56FF.S b/vm/mterp/armv5te/OP_UNUSED_56FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_56FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_57FF.S b/vm/mterp/armv5te/OP_UNUSED_57FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_57FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_58FF.S b/vm/mterp/armv5te/OP_UNUSED_58FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_58FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_59FF.S b/vm/mterp/armv5te/OP_UNUSED_59FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_59FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5AFF.S b/vm/mterp/armv5te/OP_UNUSED_5AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5BFF.S b/vm/mterp/armv5te/OP_UNUSED_5BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5CFF.S b/vm/mterp/armv5te/OP_UNUSED_5CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5DFF.S b/vm/mterp/armv5te/OP_UNUSED_5DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5EFF.S b/vm/mterp/armv5te/OP_UNUSED_5EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_5FFF.S b/vm/mterp/armv5te/OP_UNUSED_5FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_5FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_60FF.S b/vm/mterp/armv5te/OP_UNUSED_60FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_60FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_61FF.S b/vm/mterp/armv5te/OP_UNUSED_61FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_61FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_62FF.S b/vm/mterp/armv5te/OP_UNUSED_62FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_62FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_63FF.S b/vm/mterp/armv5te/OP_UNUSED_63FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_63FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_64FF.S b/vm/mterp/armv5te/OP_UNUSED_64FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_64FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_65FF.S b/vm/mterp/armv5te/OP_UNUSED_65FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_65FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_66FF.S b/vm/mterp/armv5te/OP_UNUSED_66FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_66FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_67FF.S b/vm/mterp/armv5te/OP_UNUSED_67FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_67FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_68FF.S b/vm/mterp/armv5te/OP_UNUSED_68FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_68FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_69FF.S b/vm/mterp/armv5te/OP_UNUSED_69FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_69FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6AFF.S b/vm/mterp/armv5te/OP_UNUSED_6AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6BFF.S b/vm/mterp/armv5te/OP_UNUSED_6BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6CFF.S b/vm/mterp/armv5te/OP_UNUSED_6CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6DFF.S b/vm/mterp/armv5te/OP_UNUSED_6DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6EFF.S b/vm/mterp/armv5te/OP_UNUSED_6EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_6FFF.S b/vm/mterp/armv5te/OP_UNUSED_6FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_6FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_70FF.S b/vm/mterp/armv5te/OP_UNUSED_70FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_70FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_71FF.S b/vm/mterp/armv5te/OP_UNUSED_71FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_71FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_72FF.S b/vm/mterp/armv5te/OP_UNUSED_72FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_72FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_73FF.S b/vm/mterp/armv5te/OP_UNUSED_73FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_73FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_74FF.S b/vm/mterp/armv5te/OP_UNUSED_74FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_74FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_75FF.S b/vm/mterp/armv5te/OP_UNUSED_75FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_75FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_76FF.S b/vm/mterp/armv5te/OP_UNUSED_76FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_76FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_77FF.S b/vm/mterp/armv5te/OP_UNUSED_77FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_77FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_78FF.S b/vm/mterp/armv5te/OP_UNUSED_78FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_78FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_79FF.S b/vm/mterp/armv5te/OP_UNUSED_79FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_79FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7AFF.S b/vm/mterp/armv5te/OP_UNUSED_7AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7BFF.S b/vm/mterp/armv5te/OP_UNUSED_7BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7CFF.S b/vm/mterp/armv5te/OP_UNUSED_7CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7DFF.S b/vm/mterp/armv5te/OP_UNUSED_7DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7EFF.S b/vm/mterp/armv5te/OP_UNUSED_7EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_7FFF.S b/vm/mterp/armv5te/OP_UNUSED_7FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_7FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_80FF.S b/vm/mterp/armv5te/OP_UNUSED_80FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_80FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_81FF.S b/vm/mterp/armv5te/OP_UNUSED_81FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_81FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_82FF.S b/vm/mterp/armv5te/OP_UNUSED_82FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_82FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_83FF.S b/vm/mterp/armv5te/OP_UNUSED_83FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_83FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_84FF.S b/vm/mterp/armv5te/OP_UNUSED_84FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_84FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_85FF.S b/vm/mterp/armv5te/OP_UNUSED_85FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_85FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_86FF.S b/vm/mterp/armv5te/OP_UNUSED_86FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_86FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_87FF.S b/vm/mterp/armv5te/OP_UNUSED_87FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_87FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_88FF.S b/vm/mterp/armv5te/OP_UNUSED_88FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_88FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_89FF.S b/vm/mterp/armv5te/OP_UNUSED_89FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_89FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8AFF.S b/vm/mterp/armv5te/OP_UNUSED_8AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8BFF.S b/vm/mterp/armv5te/OP_UNUSED_8BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8CFF.S b/vm/mterp/armv5te/OP_UNUSED_8CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8DFF.S b/vm/mterp/armv5te/OP_UNUSED_8DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8EFF.S b/vm/mterp/armv5te/OP_UNUSED_8EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_8FFF.S b/vm/mterp/armv5te/OP_UNUSED_8FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_8FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_90FF.S b/vm/mterp/armv5te/OP_UNUSED_90FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_90FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_91FF.S b/vm/mterp/armv5te/OP_UNUSED_91FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_91FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_92FF.S b/vm/mterp/armv5te/OP_UNUSED_92FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_92FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_93FF.S b/vm/mterp/armv5te/OP_UNUSED_93FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_93FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_94FF.S b/vm/mterp/armv5te/OP_UNUSED_94FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_94FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_95FF.S b/vm/mterp/armv5te/OP_UNUSED_95FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_95FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_96FF.S b/vm/mterp/armv5te/OP_UNUSED_96FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_96FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_97FF.S b/vm/mterp/armv5te/OP_UNUSED_97FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_97FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_98FF.S b/vm/mterp/armv5te/OP_UNUSED_98FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_98FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_99FF.S b/vm/mterp/armv5te/OP_UNUSED_99FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_99FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9AFF.S b/vm/mterp/armv5te/OP_UNUSED_9AFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9AFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9BFF.S b/vm/mterp/armv5te/OP_UNUSED_9BFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9BFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9CFF.S b/vm/mterp/armv5te/OP_UNUSED_9CFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9CFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9DFF.S b/vm/mterp/armv5te/OP_UNUSED_9DFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9DFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9EFF.S b/vm/mterp/armv5te/OP_UNUSED_9EFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9EFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_9FFF.S b/vm/mterp/armv5te/OP_UNUSED_9FFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_9FFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A0FF.S b/vm/mterp/armv5te/OP_UNUSED_A0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A1FF.S b/vm/mterp/armv5te/OP_UNUSED_A1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A2FF.S b/vm/mterp/armv5te/OP_UNUSED_A2FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A2FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A3FF.S b/vm/mterp/armv5te/OP_UNUSED_A3FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A3FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A4FF.S b/vm/mterp/armv5te/OP_UNUSED_A4FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A4FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A5FF.S b/vm/mterp/armv5te/OP_UNUSED_A5FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A5FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A6FF.S b/vm/mterp/armv5te/OP_UNUSED_A6FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A6FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A7FF.S b/vm/mterp/armv5te/OP_UNUSED_A7FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A7FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A8FF.S b/vm/mterp/armv5te/OP_UNUSED_A8FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A8FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_A9FF.S b/vm/mterp/armv5te/OP_UNUSED_A9FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_A9FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_AAFF.S b/vm/mterp/armv5te/OP_UNUSED_AAFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_AAFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_ABFF.S b/vm/mterp/armv5te/OP_UNUSED_ABFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_ABFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_ACFF.S b/vm/mterp/armv5te/OP_UNUSED_ACFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_ACFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_ADFF.S b/vm/mterp/armv5te/OP_UNUSED_ADFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_ADFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_AEFF.S b/vm/mterp/armv5te/OP_UNUSED_AEFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_AEFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_AFFF.S b/vm/mterp/armv5te/OP_UNUSED_AFFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_AFFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B0FF.S b/vm/mterp/armv5te/OP_UNUSED_B0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B1FF.S b/vm/mterp/armv5te/OP_UNUSED_B1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B2FF.S b/vm/mterp/armv5te/OP_UNUSED_B2FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B2FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B3FF.S b/vm/mterp/armv5te/OP_UNUSED_B3FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B3FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B4FF.S b/vm/mterp/armv5te/OP_UNUSED_B4FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B4FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B5FF.S b/vm/mterp/armv5te/OP_UNUSED_B5FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B5FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B6FF.S b/vm/mterp/armv5te/OP_UNUSED_B6FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B6FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B7FF.S b/vm/mterp/armv5te/OP_UNUSED_B7FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B7FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B8FF.S b/vm/mterp/armv5te/OP_UNUSED_B8FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B8FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_B9FF.S b/vm/mterp/armv5te/OP_UNUSED_B9FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_B9FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BAFF.S b/vm/mterp/armv5te/OP_UNUSED_BAFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BAFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BBFF.S b/vm/mterp/armv5te/OP_UNUSED_BBFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BBFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BCFF.S b/vm/mterp/armv5te/OP_UNUSED_BCFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BCFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BDFF.S b/vm/mterp/armv5te/OP_UNUSED_BDFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BDFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BEFF.S b/vm/mterp/armv5te/OP_UNUSED_BEFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BEFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_BFFF.S b/vm/mterp/armv5te/OP_UNUSED_BFFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_BFFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C0FF.S b/vm/mterp/armv5te/OP_UNUSED_C0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C1FF.S b/vm/mterp/armv5te/OP_UNUSED_C1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C2FF.S b/vm/mterp/armv5te/OP_UNUSED_C2FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C2FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C3FF.S b/vm/mterp/armv5te/OP_UNUSED_C3FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C3FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C4FF.S b/vm/mterp/armv5te/OP_UNUSED_C4FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C4FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C5FF.S b/vm/mterp/armv5te/OP_UNUSED_C5FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C5FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C6FF.S b/vm/mterp/armv5te/OP_UNUSED_C6FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C6FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C7FF.S b/vm/mterp/armv5te/OP_UNUSED_C7FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C7FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C8FF.S b/vm/mterp/armv5te/OP_UNUSED_C8FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C8FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_C9FF.S b/vm/mterp/armv5te/OP_UNUSED_C9FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_C9FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CAFF.S b/vm/mterp/armv5te/OP_UNUSED_CAFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CAFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CBFF.S b/vm/mterp/armv5te/OP_UNUSED_CBFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CBFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CCFF.S b/vm/mterp/armv5te/OP_UNUSED_CCFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CCFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CDFF.S b/vm/mterp/armv5te/OP_UNUSED_CDFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CDFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CEFF.S b/vm/mterp/armv5te/OP_UNUSED_CEFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CEFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_CFFF.S b/vm/mterp/armv5te/OP_UNUSED_CFFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_CFFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D0FF.S b/vm/mterp/armv5te/OP_UNUSED_D0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D1FF.S b/vm/mterp/armv5te/OP_UNUSED_D1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D2FF.S b/vm/mterp/armv5te/OP_UNUSED_D2FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D2FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D3FF.S b/vm/mterp/armv5te/OP_UNUSED_D3FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D3FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D4FF.S b/vm/mterp/armv5te/OP_UNUSED_D4FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D4FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D5FF.S b/vm/mterp/armv5te/OP_UNUSED_D5FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D5FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D6FF.S b/vm/mterp/armv5te/OP_UNUSED_D6FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D6FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D7FF.S b/vm/mterp/armv5te/OP_UNUSED_D7FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D7FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D8FF.S b/vm/mterp/armv5te/OP_UNUSED_D8FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D8FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_D9FF.S b/vm/mterp/armv5te/OP_UNUSED_D9FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_D9FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DAFF.S b/vm/mterp/armv5te/OP_UNUSED_DAFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DAFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DBFF.S b/vm/mterp/armv5te/OP_UNUSED_DBFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DBFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DCFF.S b/vm/mterp/armv5te/OP_UNUSED_DCFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DCFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DDFF.S b/vm/mterp/armv5te/OP_UNUSED_DDFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DDFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DEFF.S b/vm/mterp/armv5te/OP_UNUSED_DEFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DEFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_DFFF.S b/vm/mterp/armv5te/OP_UNUSED_DFFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_DFFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E0FF.S b/vm/mterp/armv5te/OP_UNUSED_E0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E1FF.S b/vm/mterp/armv5te/OP_UNUSED_E1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E2FF.S b/vm/mterp/armv5te/OP_UNUSED_E2FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E2FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E3FF.S b/vm/mterp/armv5te/OP_UNUSED_E3FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E3FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E4FF.S b/vm/mterp/armv5te/OP_UNUSED_E4FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E4FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E5FF.S b/vm/mterp/armv5te/OP_UNUSED_E5FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E5FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E6FF.S b/vm/mterp/armv5te/OP_UNUSED_E6FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E6FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E7FF.S b/vm/mterp/armv5te/OP_UNUSED_E7FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E7FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E8FF.S b/vm/mterp/armv5te/OP_UNUSED_E8FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E8FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_E9FF.S b/vm/mterp/armv5te/OP_UNUSED_E9FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_E9FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_EAFF.S b/vm/mterp/armv5te/OP_UNUSED_EAFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_EAFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_EBFF.S b/vm/mterp/armv5te/OP_UNUSED_EBFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_EBFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_ECFF.S b/vm/mterp/armv5te/OP_UNUSED_ECFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_ECFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_EDFF.S b/vm/mterp/armv5te/OP_UNUSED_EDFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_EDFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_EEFF.S b/vm/mterp/armv5te/OP_UNUSED_EEFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_EEFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_EFFF.S b/vm/mterp/armv5te/OP_UNUSED_EFFF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_EFFF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_F0FF.S b/vm/mterp/armv5te/OP_UNUSED_F0FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_F0FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/OP_UNUSED_F1FF.S b/vm/mterp/armv5te/OP_UNUSED_F1FF.S
new file mode 100644
index 0000000..faa7246
--- /dev/null
+++ b/vm/mterp/armv5te/OP_UNUSED_F1FF.S
@@ -0,0 +1 @@
+%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/alt_stub.S b/vm/mterp/armv5te/alt_stub.S
new file mode 100644
index 0000000..fe92076
--- /dev/null
+++ b/vm/mterp/armv5te/alt_stub.S
@@ -0,0 +1,9 @@
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (${opnum} * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
diff --git a/vm/mterp/armv5te/debug.c b/vm/mterp/armv5te/debug.c
index 9f893fe..1b54618 100644
--- a/vm/mterp/armv5te/debug.c
+++ b/vm/mterp/armv5te/debug.c
@@ -12,7 +12,7 @@
 {
     register uint32_t rPC       asm("r4");
     register uint32_t rFP       asm("r5");
-    register uint32_t rGLUE     asm("r6");
+    register uint32_t rSELF     asm("r6");
     register uint32_t rINST     asm("r7");
     register uint32_t rIBASE    asm("r8");
     register uint32_t r9        asm("r9");
@@ -21,12 +21,12 @@
     //extern char dvmAsmInstructionStart[];
 
     printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
-    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rINST=%08x\n",
-        rPC, rFP, rGLUE, rINST);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+        rPC, rFP, rSELF, rINST);
     printf("    : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
 
-    //MterpGlue* glue = (MterpGlue*) rGLUE;
-    //const Method* method = glue->method;
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
     printf("    + self is %p\n", dvmThreadSelf());
     //printf("    + currently in %s.%s %s\n",
     //    method->clazz->descriptor, method->name, method->shorty);
diff --git a/vm/mterp/armv5te/entry.S b/vm/mterp/armv5te/entry.S
index 445d580..c472e15 100644
--- a/vm/mterp/armv5te/entry.S
+++ b/vm/mterp/armv5te/entry.S
@@ -38,7 +38,7 @@
 
 /*
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *
  * This function returns a boolean "changeInterp" value.  The return comes
  * via a call to dvmMterpStdBail().
@@ -56,29 +56,28 @@
     MTERP_ENTRY2
 
     /* save stack pointer, add magic word for debuggerd */
-    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+    str     sp, [r0, #offThread_bailPtr]  @ save SP for eventual return
 
     /* set up "named" registers, figure out entry point */
-    mov     rGLUE, r0                   @ set rGLUE
-    ldr     r1, [r0, #offGlue_entryPoint]   @ enum is 4 bytes in aapcs-EABI
-    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
-    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    mov     rSELF, r0                   @ set rSELF
+    ldr     r1, [r0, #offThread_entryPoint]   @ enum is 4 bytes in aapcs-EABI
+    LOAD_PC_FP_FROM_SELF()              @ load rPC and rFP from "thread"
+    ldr     rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
     cmp     r1, #kInterpEntryInstr      @ usual case?
     bne     .Lnot_instr                 @ no, handle it
 
 #if defined(WITH_JIT)
 .LentryInstr:
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     /* Entry is always a possible trace start */
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     mov     r1, #0                      @ prepare the value for the new state
-    str     r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str     r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     cmp     r0,#0                       @ is profiling disabled?
 #if !defined(WITH_SELF_VERIFICATION)
     bne     common_updateProfile        @ profiling is enabled
 #else
-    ldr     r2, [r10, #offThread_shadowSpace]   @ to find out the jit exit state
+    ldr     r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
     beq     1f                          @ profiling is disabled
     ldr     r3, [r2, #offShadowSpace_jitExitState]  @ jit exit state
     cmp     r3, #kSVSTraceSelect        @ hot trace following?
@@ -108,20 +107,20 @@
 
 #if defined(WITH_JIT)
 .Lnot_throw:
-    ldr     r10,[rGLUE, #offGlue_jitResumeNPC]
-    ldr     r2,[rGLUE, #offGlue_jitResumeDPC]
+    ldr     r10,[rSELF, #offThread_jitResumeNPC]
+    ldr     r2,[rSELF, #offThread_jitResumeDPC]
     cmp     r1, #kInterpEntryResume     @ resuming after Jit single-step?
     bne     .Lbad_arg
     cmp     rPC,r2
     bne     .LentryInstr                @ must have branched, don't resume
 #if defined(WITH_SELF_VERIFICATION)
-    @ glue->entryPoint will be set in dvmSelfVerificationSaveState
+    @ self->entryPoint will be set in dvmSelfVerificationSaveState
     b       jitSVShadowRunStart         @ re-enter the translation after the
                                         @ single-stepped instruction
     @noreturn
 #endif
     mov     r1, #kInterpEntryInstr
-    str     r1, [rGLUE, #offGlue_entryPoint]
+    str     r1, [rSELF, #offThread_entryPoint]
     bx      r10                         @ re-enter the translation
 #endif
 
@@ -131,6 +130,7 @@
     bl      printf
     bl      dvmAbort
     .fnend
+    .size   dvmMterpStdRun, .-dvmMterpStdRun
 
 
     .global dvmMterpStdBail
@@ -146,11 +146,11 @@
  * LR to PC.
  *
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *  r1  bool changeInterp
  */
 dvmMterpStdBail:
-    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    ldr     sp, [r0, #offThread_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
     ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 3de35ef..5c5c5ce 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -14,71 +14,64 @@
 #if defined(WITH_SELF_VERIFICATION)
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     mov    r3, #0
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChainNoProfile
 dvmJitToInterpNoChainNoProfile:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 #else
 /*
@@ -90,7 +83,6 @@
  */
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    rPC, r0
 #if defined(WITH_JIT_TUNING)
     mov    r0,lr
@@ -98,8 +90,8 @@
 #endif
     EXPORT_PC()
     mov    r0, #0
-    str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
-    adrl   rIBASE, dvmAsmInstructionStart
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -113,17 +105,17 @@
  */
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r1,#kInterpEntryInstr
     @ enum is 4 byte in aapcs-EABI
-    str    r1, [rGLUE, #offGlue_entryPoint]
+    str    r1, [rSELF, #offThread_entryPoint]
     mov    rPC,r0
     EXPORT_PC()
 
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r1,#1                  @ set changeInterp to bail to debug interp
     b      common_gotoBail
 
@@ -136,10 +128,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0                    @ !0 means translation exists
@@ -154,12 +145,11 @@
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST, #-4              @  .. which is 9 bytes back
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr       @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    2f
     mov    r1,rINST
@@ -172,7 +162,7 @@
 
 /* No translation, so request one if profiling isn't disabled*/
 2:
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     cmp    r0, #0
@@ -198,15 +188,14 @@
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST,#-4               @ .. which is 9 bytes back
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNormal
 #endif
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    toInterpreter            @ go if not, otherwise do chain
     mov    r1,rINST
@@ -226,16 +215,15 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
     bxne   r0                       @ continue native execution if so
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -249,10 +237,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
@@ -261,13 +248,13 @@
 
 /*
  * No translation, restore interpreter regs and start interpreting.
- * rGLUE & rFP were preserved in the translated code, and rPC has
+ * rSELF & rFP were preserved in the translated code, and rPC has
  * already been restored by the time we get here.  We'll need to set
  * up rIBASE & rINST, and load the address of the JitTable into r0.
  */
 toInterpreter:
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_JIT_PROF_TABLE(r0)
     @ NOTE: intended fallthrough
@@ -299,13 +286,13 @@
  * is already a native translation in place (and, if so,
  * jump to it now).
  */
+
     GET_JIT_THRESHOLD(r1)
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
     EXPORT_PC()
     mov     r0,rPC
-    bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
-    str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl      dvmJitGetTraceAddr          @ r0<- dvmJitGetTraceAddr(rPC)
+    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov     r1, rPC                     @ arg1 of translation may need this
     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
     cmp     r0,#0
@@ -326,9 +313,8 @@
     cmp     r0, r10                     @ special case?
     bne     jitSVShadowRunStart         @ set up self verification shadow space
     @ Need to clear the inJitCodeCache flag
-    ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
     mov    r3, #0                       @ 0 means not in the JIT code cache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
     /* no return */
@@ -339,9 +325,10 @@
  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
  */
 common_selectTrace:
-    str     r2,[rGLUE,#offGlue_jitState]
+
+    str     r2,[rSELF,#offThread_jitState]
     mov     r2,#kInterpEntryInstr       @ normal entry reason
-    str     r2,[rGLUE,#offGlue_entryPoint]
+    str     r2,[rSELF,#offThread_entryPoint]
     mov     r1,#1                       @ set changeInterp
     b       common_gotoBail
 
@@ -350,42 +337,41 @@
  * Save PC and registers to shadow memory for self verification mode
  * before jumping to native translation.
  * On entry:
- *    rPC, rFP, rGLUE: the values that they should contain
+ *    rPC, rFP, rSELF: the values that they should contain
  *    r10: the address of the target translation.
  */
 jitSVShadowRunStart:
     mov     r0,rPC                      @ r0<- program counter
     mov     r1,rFP                      @ r1<- frame pointer
-    mov     r2,rGLUE                    @ r2<- InterpState pointer
+    mov     r2,rSELF                    @ r2<- self (Thread) pointer
     mov     r3,r10                      @ r3<- target translation
     bl      dvmSelfVerificationSaveState @ save registers to shadow space
     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
-    add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
     bx      r10                         @ jump to the translation
 
 /*
- * Restore PC, registers, and interpState to original values
+ * Restore PC, registers, and interpreter state to original values
  * before jumping back to the interpreter.
  */
 jitSVShadowRunEnd:
     mov    r1,rFP                        @ pass ending fp
+    mov    r3,rSELF                      @ pass self ptr for convenience
     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
-    ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
-    ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
-    ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
+    ldr    rPC,[rSELF,#offThread_pc]     @ restore PC
+    ldr    rFP,[rSELF,#offThread_fp]     @ restore FP
     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
     cmp    r1,#0                         @ check for punt condition
     beq    1f
     mov    r2,#kJitSelfVerification      @ ask for self verification
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r2,#kInterpEntryInstr         @ normal entry reason
-    str    r2,[rGLUE,#offGlue_entryPoint]
+    str    r2,[rSELF,#offThread_entryPoint]
     mov    r1,#1                         @ set changeInterp
     b      common_gotoBail
 
 1:                                       @ exit to interpreter without check
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -440,48 +426,20 @@
  *  r9 is trampoline PC adjustment *in bytes*
  */
 common_periodicChecks:
-    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
-
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
-
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrnes  ip, ip, r1                  @ ip<- suspendCount | debuggerActive
-    /*
-     * Don't switch the interpreter in the libdvm_traceview build even if the
-     * profiler is active.
-     * The code here is opted for less intrusion instead of performance.
-     * That is, *pActiveProfilers is still loaded into r2 even though it is not
-     * used when WITH_INLINE_PROFILING is defined.
-     */
-#if !defined(WITH_INLINE_PROFILING)
-    orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
-#endif
-
-
-    bxeq    lr                          @ all zero, return
-
+/* TUNING - make this a direct load when interpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak] @ r3<- &interpBreak
+    /* speculatively thread-specific suspend count */
+    ldr     ip, [rSELF, #offThread_suspendCount]
+    ldr     r1, [r1]                                @ r1<- interpBreak
+    cmp     r1, #0                                  @ anything unusual?
+    bxeq    lr                                      @ return if not
     /*
      * One or more interesting events have happened.  Figure out what.
      *
-     * If debugging or profiling are compiled in, we need to disambiguate.
-     *
      * r0 still holds the reentry type.
      */
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
     cmp     ip, #0                      @ want suspend?
-    beq     1f                          @ no, must be debugger/profiler
+    beq     3f                          @ no, must be something else
 
     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
 #if defined(WITH_JIT)
@@ -489,77 +447,86 @@
      * Refresh the Jit's cached copy of profile table pointer.  This pointer
      * doubles as the Jit's on/off switch.
      */
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
+    mov     r0, rSELF                  @ r0<- self
     ldr     r3, [r3] @ r3 <- pJitProfTable
     EXPORT_PC()                         @ need for precise GC
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh Jit's on/off switch
 #else
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     EXPORT_PC()                         @ need for precise GC
 #endif
     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
 
     /*
-     * Reload the debugger/profiler enable flags.  We're checking to see
-     * if either of these got set while we were suspended.
-     *
-     * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
-     * is enabled or not as the profiling will be done inline.
+     * Reload the interpBreak flags - they may have changed while we
+     * were suspended.
      */
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
+/* TUNING - direct load when InterpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak]   @ r1<- &interpBreak
+    ldr     r1, [r1]                    @ r1<- interpBreak
+3:
+    /*
+     * TODO: this code is too fragile.  Need a general mechanism
+     * to identify what actions to take by submode.  Some profiling modes
+     * (instruction count) need to single-step, while method tracing
+     * may not.  Debugging with breakpoints can run unfettered, but
+     * source-level single-stepping requires Dalvik singlestepping.
+     * GC may require a one-shot action and then full-speed resumption.
+     */
+    ands    r1, #(kSubModeDebuggerActive | kSubModeEmulatorTrace | kSubModeInstCounting)
+    bxeq    lr                          @ nothing to do, return
 
-#if !defined(WITH_INLINE_PROFILING)
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrs    r1, r1, r2
-#else
-    cmp     r1, #0                      @ only consult the debuggerActive flag
-#endif
-
-    beq     2f
-
-1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
-    str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
+    @ debugger/profiler enabled, bail out; self->entryPoint was set above
+    str     r0, [rSELF, #offThread_entryPoint]  @ store r0, need for debug/prof
     add     rPC, rPC, r9                @ update rPC
     mov     r1, #1                      @ "want switch" = true
     b       common_gotoBail             @ side exit
 
-2:
-    bx      lr                          @ nothing to do, return
-
 
 /*
  * The equivalent of "goto bail", this calls through the "bail handler".
  *
- * State registers will be saved to the "glue" area before bailing.
+ * State registers will be saved to the "thread" area before bailing.
  *
  * On entry:
  *  r1 is "bool changeInterp", indicating if we want to switch to the
  *     other interpreter or just bail all the way out
  */
 common_gotoBail:
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
-    mov     r0, rGLUE                   @ r0<- glue ptr
-    b       dvmMterpStdBail             @ call(glue, changeInterp)
+    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
+    mov     r0, rSELF                   @ r0<- self ptr
+    b       dvmMterpStdBail             @ call(self, changeInterp)
 
     @add     r1, r1, #1                  @ using (boolean+1)
-    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @add     r0, rSELF, #offThread_jmpBuf @ r0<- &self->jmpBuf
     @bl      _longjmp                    @ does not return
     @bl      common_abort
 
 
 /*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+    @ prepare to copy args to "outs" area of current frame
+    add     rPC, rPC, #4                @ adjust pc to make return consistent
+    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    cmp     r2, #0                      @ no args?
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+    b       .LinvokeRangeArgs           @ handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
@@ -573,16 +540,15 @@
     beq     .LinvokeArgsDone            @ if no args, skip the rest
     FETCH(r1, 2)                        @ r1<- CCCC
 
+.LinvokeRangeArgs:
     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
     @ (very few methods have > 10 args; could unroll for common cases)
     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
 1:  ldr     r1, [r3], #4                @ val = *fp++
     subs    r2, r2, #1                  @ count--
     str     r1, [r10], #4               @ *outs++ = val
     bne     1b                          @ ...while count != 0
-    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
     b       .LinvokeArgsDone
 
 /*
@@ -597,11 +563,9 @@
     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
-    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     beq     .LinvokeArgsDone
 
-    @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
 .LinvokeNonRange:
     rsb     r2, r2, #5                  @ r2<- 5-r2
     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
@@ -628,7 +592,9 @@
     str     r2, [r10, #-4]!             @ *--outs = vD
 0:  @ fall through to .LinvokeArgsDone
 
-.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
+.LinvokeArgsDone: @ r0=methodToCall
+    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
     @ find space for the new stack frame, check for overflow
@@ -636,13 +602,15 @@
     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
 @    bl      common_dumpRegs
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
     cmp     r3, r9                      @ bottom < interpStackEnd?
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
     blo     .LstackOverflow             @ yes, this frame will overflow stack
 
     @ set up newSaveArea
+    ldr     lr, [lr]                    @ lr<- active submodes
 #ifdef EASY_GDB
     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
     str     ip, [r10, #offStackSaveArea_prevSave]
@@ -653,13 +621,14 @@
     mov     r9, #0
     str     r9, [r10, #offStackSaveArea_returnAddr]
 #endif
-#if defined(WITH_INLINE_PROFILING)
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     1f                          @ skip if not
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    mov     r1, rSELF
+    @ r0=methodToCall, r1=rSELF
     bl      dvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
+1:
     str     r0, [r10, #offStackSaveArea_method]
     tst     r3, #ACC_NATIVE
     bne     .LinvokeNative
@@ -682,18 +651,17 @@
     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
     mov     rPC, r2                         @ publish new rPC
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update state values for the new method
+    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
 #if defined(WITH_JIT)
     GET_JIT_PROF_TABLE(r0)
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     cmp     r0,#0
     bne     common_updateProfile
     GOTO_OPCODE(ip)                         @ jump to next instruction
@@ -701,22 +669,23 @@
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     GOTO_OPCODE(ip)                         @ jump to next instruction
 #endif
 
 .LinvokeNative:
     @ Prep for the native call
     @ r0=methodToCall, r1=newFp, r10=newSaveArea
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [lr]                    @ lr<- active submodes
 
     mov     r2, r0                      @ r2<- methodToCall
     mov     r0, r1                      @ r0<- newFp (points to args)
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
 
 #ifdef ASSIST_DEBUGGER
     /* insert fake function header to help gdb find the stack frame */
@@ -729,36 +698,27 @@
 .Lskip:
 #endif
 
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=JNIMethod, r6=rGLUE
-    stmfd   sp!, {r2,r6}
-#endif
-
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    bne     330f                        @ hop if so
     mov     lr, pc                      @ set return addr
     ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-
-#if defined(WITH_INLINE_PROFILING)
-    @ r0=JNIMethod, r1=rGLUE
-    ldmfd   sp!, {r0-r1}
-    bl      dvmFastNativeMethodTraceExit
-#endif
-
+220:
 #if defined(WITH_JIT)
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ Refresh Jit's on/off status
 #endif
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
 #if defined(WITH_JIT)
     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
 #endif
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
 #if defined(WITH_JIT)
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh cached on/off switch
 #endif
     bne     common_exceptionThrown      @ no, handle exception
 
@@ -766,13 +726,26 @@
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+330:
+    @ r2=JNIMethod, r6=rSELF
+    stmfd   sp!, {r2,r6}
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ r0=JNIMethod, r1=rSELF
+    ldmfd   sp!, {r0-r1}
+    bl      dvmFastNativeMethodTraceExit
+    b       220b
+
 .LstackOverflow:    @ r0=methodToCall
     mov     r1, r0                      @ r1<- methodToCall
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmHandleStackOverflow
     b       common_exceptionThrown
 #ifdef ASSIST_DEBUGGER
     .fnend
+    .size   dalvik_mterp, .-dalvik_mterp
 #endif
 
 
@@ -792,8 +765,8 @@
     sub     sp, sp, #8                  @ space for args + pad
     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
     mov     r2, r0                      @ A2<- methodToCall
-    mov     r0, rGLUE                   @ A0<- glue
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rSELF                   @ A0<- self
+    SAVE_PC_FP_TO_SELF()                @ export state to "self"
     mov     r1, r9                      @ A1<- methodCallRange
     mov     r3, rINST, lsr #8           @ A3<- AA
     str     ip, [sp, #0]                @ A4<- ip
@@ -815,19 +788,21 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-#if defined(WITH_INLINE_PROFILING)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    SAVEAREA_FROM_FP(r0, rFP)
+    ldr     lr, [lr]                    @ lr<- active submodes
+    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     333f
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r0, r6
-    @ r0=rGlue
+    mov     r0, rSELF
+    @ r0=rSELF
     bl      dvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
-    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+333:
     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
-    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ is this a break frame?
 #if defined(WORKAROUND_CORTEX_A9_745320)
     /* Don't use conditional loads if the HW defect exists */
@@ -841,14 +816,14 @@
     beq     common_gotoBail             @ break frame, bail out completely
 
     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
 #if defined(WITH_JIT)
     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
-    str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
+    str     r1, [rSELF, #offThread_methodClassDex]
+    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
     cmp     r10, #0                      @ caller is compiled code
     blxne   r10
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -856,7 +831,7 @@
 #else
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
+    str     r1, [rSELF, #offThread_methodClassDex]
     GOTO_OPCODE(ip)                     @ jump to next instruction
 #endif
 
@@ -865,8 +840,8 @@
      */
      .if    0
 .LreturnOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_returnFromMethod
     b       common_resumeAfterGlueCall
     .endif
@@ -889,13 +864,12 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
-    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
-    mov     r1, r10                     @ r1<- self
+    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
+    mov     r1, rSELF                   @ r1<- self
     mov     r0, r9                      @ r0<- exception
     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
     mov     r3, #0                      @ r3<- NULL
-    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
 
     /* set up args and a local for "&fp" */
     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
@@ -903,8 +877,8 @@
     mov     ip, sp                      @ ip<- &fp
     mov     r3, #0                      @ r3<- false
     str     ip, [sp, #-4]!              @ *--sp = &fp
-    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
-    mov     r0, r10                     @ r0<- self
+    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
+    mov     r0, rSELF                   @ r0<- self
     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
     mov     r2, r9                      @ r2<- exception
     sub     r1, rPC, r1                 @ r1<- pc - method->insns
@@ -914,11 +888,11 @@
     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
 
     /* fix earlier stack overflow if necessary; may trash rFP */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
     beq     1f                          @ no, skip ahead
     mov     rFP, r0                     @ save relPc result in rFP
-    mov     r0, r10                     @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     mov     r1, r9                      @ r1<- exception
     bl      dvmCleanupStackOverflow     @ call(self)
     mov     r0, rFP                     @ restore result
@@ -933,30 +907,30 @@
     /* adjust locals to match self->curFrame and updated PC */
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
-    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    str     r1, [rSELF, #offThread_method]  @ self->method = new method
     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
-    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
 
     /* release the tracked alloc on the exception */
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
 
     /* restore the exception if the handler wants it */
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
-    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
-.LnotCaughtLocally: @ r9=exception, r10=self
+.LnotCaughtLocally: @ r9=exception
     /* fix stack overflow if necessary */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
-    movne   r0, r10                     @ if yes: r0<- self
+    movne   r0, rSELF                   @ if yes: r0<- self
     movne   r1, r9                      @ if yes: r1<- exception
     blne    dvmCleanupStackOverflow     @ if yes: call(self)
 
@@ -965,14 +939,14 @@
     /* call __android_log_print(prio, tag, format, ...) */
     /* "Exception %s from %s:%d not caught locally" */
     @ dvmLineNumFromPC(method, pc - method->insns)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     ldr     r1, [r0, #offMethod_insns]
     sub     r1, rPC, r1
     asr     r1, r1, #1
     bl      dvmLineNumFromPC
     str     r0, [sp, #-4]!
     @ dvmGetMethodSourceFile(method)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     bl      dvmGetMethodSourceFile
     str     r0, [sp, #-4]!
     @ exception->clazz->descriptor
@@ -984,9 +958,9 @@
     mov     r0, #3                      @ LOG_DEBUG
     bl      __android_log_print
 #endif
-    str     r9, [r10, #offThread_exception] @ restore exception
+    str     r9, [rSELF, #offThread_exception] @ restore exception
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
     mov     r1, #0                      @ "want switch" = false
     b       common_gotoBail             @ bail out
@@ -997,8 +971,8 @@
      */
     .if     0
 .LexceptionOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_exceptionThrown
     b       common_resumeAfterGlueCall
     .endif
@@ -1009,7 +983,7 @@
  * values and start executing at the next instruction.
  */
 common_resumeAfterGlueCall:
-    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -1017,15 +991,14 @@
 /*
  * Invalid array index. Note that our calling convention is strange; we use r1
  * and r3 because those just happen to be the registers all our callers are
- * using. We shuffle them here before calling the C function.
+ * using. We move r3 before calling the C function, but r1 happens to match.
  * r1: index
  * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
-    mov     r0, r1
-    mov     r1, r3
-    bl      dvmThrowAIOOBE
+    mov     r0, r3
+    bl      dvmThrowArrayIndexOutOfBoundsException
     b       common_exceptionThrown
 
 /*
@@ -1033,29 +1006,28 @@
  */
 common_errDivideByZero:
     EXPORT_PC()
-    ldr     r0, strArithmeticException
-    ldr     r1, strDivideByZero
-    bl      dvmThrowException
+    ldr     r0, strDivideByZero
+    bl      dvmThrowArithmeticException
     b       common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
  */
 common_errNegativeArraySize:
     EXPORT_PC()
-    ldr     r0, strNegativeArraySizeException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1                                @ arg0 <- len
+    bl      dvmThrowNegativeArraySizeException    @ (len)
     b       common_exceptionThrown
 
 /*
  * Invocation of a non-existent method.
+ * On entry: method name in r1
  */
 common_errNoSuchMethod:
     EXPORT_PC()
-    ldr     r0, strNoSuchMethodError
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1
+    bl      dvmThrowNoSuchMethodError
     b       common_exceptionThrown
 
 /*
@@ -1065,9 +1037,8 @@
  */
 common_errNullObject:
     EXPORT_PC()
-    ldr     r0, strNullPointerException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, #0
+    bl      dvmThrowNullPointerException
     b       common_exceptionThrown
 
 /*
@@ -1203,17 +1174,8 @@
  * String references, must be close to the code that uses them.
  */
     .align  2
-strArithmeticException:
-    .word   .LstrArithmeticException
 strDivideByZero:
     .word   .LstrDivideByZero
-strNegativeArraySizeException:
-    .word   .LstrNegativeArraySizeException
-strNoSuchMethodError:
-    .word   .LstrNoSuchMethodError
-strNullPointerException:
-    .word   .LstrNullPointerException
-
 strLogTag:
     .word   .LstrLogTag
 strExceptionNotCaughtLocally:
@@ -1241,23 +1203,10 @@
 
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
-.LstrDivideByZero:
-    .asciz  "divide by zero"
 .LstrFilledNewArrayNotImpl:
     .asciz  "filled-new-array only implemented for objects and 'int'"
-.LstrInternalError:
-    .asciz  "Ljava/lang/InternalError;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrNullPointerException:
-    .asciz  "Ljava/lang/NullPointerException;"
-
+.LstrDivideByZero:
+    .asciz  "divide by zero"
 .LstrLogTag:
     .asciz  "mterp"
 .LstrExceptionNotCaughtLocally:
diff --git a/vm/mterp/armv5te/header.S b/vm/mterp/armv5te/header.S
index bd6b7ee..9d379d9 100644
--- a/vm/mterp/armv5te/header.S
+++ b/vm/mterp/armv5te/header.S
@@ -56,7 +56,7 @@
   reg nick      purpose
   r4  rPC       interpreted program counter, used for fetching instructions
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     self (Thread) pointer
   r7  rINST     first 16-bit code unit of current instruction
   r8  rIBASE    interpreted instruction base pointer, used for computed goto
 
@@ -68,21 +68,21 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
-/* save/restore the PC and/or FP from the glue struct */
-#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
-#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
-#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
-#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
-#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
-#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()     ldr     rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF()       str     rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF()     ldr     rFP, [rSELF, #offThread_fp]
+#define SAVE_FP_TO_SELF()       str     rFP, [rSELF, #offThread_fp]
+#define LOAD_PC_FP_FROM_SELF()  ldmia   rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF()    stmia   rSELF, {rPC, rFP}
 
 /*
  * "export" the PC to the stack frame, f/b/o future exception objects.  Must
- * be done *before* something calls dvmThrowException.
+ * be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -117,14 +117,14 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #((_count)*2)]!
 
 /*
  * The operation performed here is similar to FETCH_ADVANCE_INST, except the
  * src and dest registers are parameterized (not hard-wired to rPC and rINST).
  */
 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-        ldrh    _dreg, [_sreg, #(_count*2)]!
+        ldrh    _dreg, [_sreg, #((_count)*2)]!
 
 /*
  * Fetch the next instruction from an offset specified by _reg.  Updates
@@ -144,15 +144,15 @@
  *
  * The "_S" variant works the same but treats the value as signed.
  */
-#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
-#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #((_count)*2)]
 
 /*
  * Fetch one byte from an offset past the current PC.  Pass in the same
  * "_count" as you would for FETCH, and an additional 0/1 indicating which
  * byte of the halfword you want (lo/hi).
  */
-#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #((_count)*2+(_byte))]
 
 /*
  * Put the instruction's opcode field into the specified register.
@@ -179,8 +179,8 @@
 #define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
 
 #if defined(WITH_JIT)
-#define GET_JIT_PROF_TABLE(_reg)    ldr     _reg,[rGLUE,#offGlue_pJitProfTable]
-#define GET_JIT_THRESHOLD(_reg)     ldr     _reg,[rGLUE,#offGlue_jitThreshold]
+#define GET_JIT_PROF_TABLE(_reg)    ldr _reg,[rSELF,#offThread_pJitProfTable]
+#define GET_JIT_THRESHOLD(_reg)     ldr _reg,[rSELF,#offThread_jitThreshold]
 #endif
 
 /*
diff --git a/vm/mterp/armv5te/stub.S b/vm/mterp/armv5te/stub.S
index 54f0778..767427b 100644
--- a/vm/mterp/armv5te/stub.S
+++ b/vm/mterp/armv5te/stub.S
@@ -1,8 +1,8 @@
     /* (stub) */
-    SAVE_PC_FP_TO_GLUE()            @ only need to export these two
-    mov     r0, rGLUE               @ glue is first arg to function
+    SAVE_PC_FP_TO_SELF()            @ only need to export these two
+    mov     r0, rSELF               @ self is first arg to function
     bl      dvmMterp_${opcode}      @ call
-    LOAD_PC_FP_FROM_GLUE()          @ retrieve updated values
+    LOAD_PC_FP_FROM_SELF()          @ retrieve updated values
     FETCH_INST()                    @ load next instruction from rPC
     GET_INST_OPCODE(ip)             @ ...trim down to just the opcode
     GOTO_OPCODE(ip)                 @ ...and jump to the handler
diff --git a/vm/mterp/armv6t2/OP_IGET.S b/vm/mterp/armv6t2/OP_IGET.S
index f5a21eb..14ddf44 100644
--- a/vm/mterp/armv6t2/OP_IGET.S
+++ b/vm/mterp/armv6t2/OP_IGET.S
@@ -11,14 +11,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv6t2/OP_IGET_WIDE.S b/vm/mterp/armv6t2/OP_IGET_WIDE.S
index 92cd1a6..3e826dd 100644
--- a/vm/mterp/armv6t2/OP_IGET_WIDE.S
+++ b/vm/mterp/armv6t2/OP_IGET_WIDE.S
@@ -8,14 +8,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv6t2/OP_IPUT.S b/vm/mterp/armv6t2/OP_IPUT.S
index b69443b..4bc8e1b 100644
--- a/vm/mterp/armv6t2/OP_IPUT.S
+++ b/vm/mterp/armv6t2/OP_IPUT.S
@@ -11,14 +11,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/armv6t2/OP_IPUT_WIDE.S b/vm/mterp/armv6t2/OP_IPUT_WIDE.S
index 334e352..f4ddb43 100644
--- a/vm/mterp/armv6t2/OP_IPUT_WIDE.S
+++ b/vm/mterp/armv6t2/OP_IPUT_WIDE.S
@@ -5,14 +5,14 @@
 %verify "field cannot be resolved"
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .L${opcode}_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
diff --git a/vm/mterp/c/OP_APUT_OBJECT.c b/vm/mterp/c/OP_APUT_OBJECT.c
index 136d0aa..950f18c 100644
--- a/vm/mterp/c/OP_APUT_OBJECT.c
+++ b/vm/mterp/c/OP_APUT_OBJECT.c
@@ -13,7 +13,8 @@
         if (!checkForNull((Object*) arrayObj))
             GOTO_exceptionThrown();
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);
+            dvmThrowArrayIndexOutOfBoundsException(
+                arrayObj->length, GET_REGISTER(vsrc2));
             GOTO_exceptionThrown();
         }
         obj = (Object*) GET_REGISTER(vdst);
diff --git a/vm/mterp/c/OP_CHECK_CAST_JUMBO.c b/vm/mterp/c/OP_CHECK_CAST_JUMBO.c
new file mode 100644
index 0000000..75c314b
--- /dev/null
+++ b/vm/mterp/c/OP_CHECK_CAST_JUMBO.c
@@ -0,0 +1,31 @@
+HANDLE_OPCODE(OP_CHECK_CAST_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vsrc1 = FETCH(3);
+        ILOGV("|check-cast/jumbo v%d,class@0x%08x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(curMethod->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowClassCastException(obj->clazz, clazz);
+                GOTO_exceptionThrown();
+            }
+        }
+    }
+    FINISH(4);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_CLASS_JUMBO.c b/vm/mterp/c/OP_CONST_CLASS_JUMBO.c
new file mode 100644
index 0000000..4fb1431
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_CLASS_JUMBO.c
@@ -0,0 +1,18 @@
+HANDLE_OPCODE(OP_CONST_CLASS_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|const-class/jumbo v%d class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(curMethod->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(4);
+OP_END
diff --git a/vm/mterp/c/OP_DISPATCH_FF.c b/vm/mterp/c/OP_DISPATCH_FF.c
index a058b99..53f50c5 100644
--- a/vm/mterp/c/OP_DISPATCH_FF.c
+++ b/vm/mterp/c/OP_DISPATCH_FF.c
@@ -1,8 +1,6 @@
 HANDLE_OPCODE(OP_DISPATCH_FF)
     /*
-     * In portable interp, most unused opcodes will fall through to here.
+     * Indicates extended opcode.  Use next 8 bits to choose where to branch.
      */
-    LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
-    dvmAbort();
-    FINISH(1);
+    DISPATCH_EXTENDED(INST_AA(inst));
 OP_END
diff --git a/vm/mterp/c/OP_FILLED_NEW_ARRAY.c b/vm/mterp/c/OP_FILLED_NEW_ARRAY.c
index fad7dbb..281318d 100644
--- a/vm/mterp/c/OP_FILLED_NEW_ARRAY.c
+++ b/vm/mterp/c/OP_FILLED_NEW_ARRAY.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
-    GOTO_invoke(filledNewArray, false);
+    GOTO_invoke(filledNewArray, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_FILLED_NEW_ARRAY_JUMBO.c b/vm/mterp/c/OP_FILLED_NEW_ARRAY_JUMBO.c
new file mode 100644
index 0000000..dfbc31b
--- /dev/null
+++ b/vm/mterp/c/OP_FILLED_NEW_ARRAY_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, class@AAAAAAAA*/)
+    GOTO_invoke(filledNewArray, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c b/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c
index 06c3a79..48bdf26 100644
--- a/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c
+++ b/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
-    GOTO_invoke(filledNewArray, true);
+    GOTO_invoke(filledNewArray, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_FILL_ARRAY_DATA.c b/vm/mterp/c/OP_FILL_ARRAY_DATA.c
index 095b465..678bb32 100644
--- a/vm/mterp/c/OP_FILL_ARRAY_DATA.c
+++ b/vm/mterp/c/OP_FILL_ARRAY_DATA.c
@@ -14,8 +14,7 @@
             arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
         {
             /* should have been caught in verifier */
-            dvmThrowException("Ljava/lang/InternalError;",
-                              "bad fill array data");
+            dvmThrowInternalError("bad fill array data");
             GOTO_exceptionThrown();
         }
 #endif
diff --git a/vm/mterp/c/OP_IGET_BOOLEAN_JUMBO.c b/vm/mterp/c/OP_IGET_BOOLEAN_JUMBO.c
new file mode 100644
index 0000000..712ae91
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_BOOLEAN_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_BYTE_JUMBO.c b/vm/mterp/c/OP_IGET_BYTE_JUMBO.c
new file mode 100644
index 0000000..ade7eb9
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_BYTE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_BYTE_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_CHAR_JUMBO.c b/vm/mterp/c/OP_IGET_CHAR_JUMBO.c
new file mode 100644
index 0000000..a674059
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_CHAR_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_CHAR_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_JUMBO.c b/vm/mterp/c/OP_IGET_JUMBO.c
new file mode 100644
index 0000000..32eefc8
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_JUMBO,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_OBJECT_JUMBO.c b/vm/mterp/c/OP_IGET_OBJECT_JUMBO.c
new file mode 100644
index 0000000..2b25dae
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_OBJECT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_OBJECT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IGET_OBJECT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..705aefd
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_OBJECT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_SHORT_JUMBO.c b/vm/mterp/c/OP_IGET_SHORT_JUMBO.c
new file mode 100644
index 0000000..30b3ff1
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_SHORT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_SHORT_JUMBO,    "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IGET_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..462279a
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_WIDE_JUMBO.c b/vm/mterp/c/OP_IGET_WIDE_JUMBO.c
new file mode 100644
index 0000000..f607a77
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_WIDE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_WIDE_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IGET_WIDE_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..5285a31
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_WIDE_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_INSTANCE_OF_JUMBO.c b/vm/mterp/c/OP_INSTANCE_OF_JUMBO.c
new file mode 100644
index 0000000..0249d96
--- /dev/null
+++ b/vm/mterp/c/OP_INSTANCE_OF_JUMBO.c
@@ -0,0 +1,30 @@
+HANDLE_OPCODE(OP_INSTANCE_OF_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);   /* object to check */
+        ILOGV("|instance-of/jumbo v%d,v%d,class@0x%08x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(curMethod->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(5);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT.c b/vm/mterp/c/OP_INVOKE_DIRECT.c
index 58cfe5b..11a2c81 100644
--- a/vm/mterp/c/OP_INVOKE_DIRECT.c
+++ b/vm/mterp/c/OP_INVOKE_DIRECT.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeDirect, false);
+    GOTO_invoke(invokeDirect, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c b/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c
deleted file mode 100644
index d649252..0000000
--- a/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c
+++ /dev/null
@@ -1,15 +0,0 @@
-HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-#if INTERP_TYPE != INTERP_DBG
-    //LOGI("Ignoring empty\n");
-    FINISH(3);
-#else
-    if (!gDvm.debuggerActive) {
-        //LOGI("Skipping empty\n");
-        FINISH(3);      // don't want it to show up in profiler output
-    } else {
-        //LOGI("Running empty\n");
-        /* fall through to OP_INVOKE_DIRECT */
-        GOTO_invoke(invokeDirect, false);
-    }
-#endif
-OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT_JUMBO.c b/vm/mterp/c/OP_INVOKE_DIRECT_JUMBO.c
new file mode 100644
index 0000000..e31e584
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_DIRECT_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_DIRECT_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeDirect, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c b/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c
index 9877bbe..6de06ee 100644
--- a/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeDirect, true);
+    GOTO_invoke(invokeDirect, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_INTERFACE.c b/vm/mterp/c/OP_INVOKE_INTERFACE.c
index 9c639d5..1de99d1 100644
--- a/vm/mterp/c/OP_INVOKE_INTERFACE.c
+++ b/vm/mterp/c/OP_INVOKE_INTERFACE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeInterface, false);
+    GOTO_invoke(invokeInterface, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_INTERFACE_JUMBO.c b/vm/mterp/c/OP_INVOKE_INTERFACE_JUMBO.c
new file mode 100644
index 0000000..720a9bf
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_INTERFACE_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeInterface, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c b/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c
index 6244c9e..5cabdfb 100644
--- a/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeInterface, true);
+    GOTO_invoke(invokeInterface, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_OBJECT_INIT_JUMBO.c b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_JUMBO.c
new file mode 100644
index 0000000..3f9d052
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_JUMBO.c
@@ -0,0 +1,28 @@
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c
new file mode 100644
index 0000000..da0d762
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c
@@ -0,0 +1,28 @@
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
+#endif
+        FINISH(3);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_STATIC.c b/vm/mterp/c/OP_INVOKE_STATIC.c
index 81f3d62..a162e0b 100644
--- a/vm/mterp/c/OP_INVOKE_STATIC.c
+++ b/vm/mterp/c/OP_INVOKE_STATIC.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeStatic, false);
+    GOTO_invoke(invokeStatic, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_STATIC_JUMBO.c b/vm/mterp/c/OP_INVOKE_STATIC_JUMBO.c
new file mode 100644
index 0000000..29066e9
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_STATIC_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_STATIC_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeStatic, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c b/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c
index 3fc4c35..103f745 100644
--- a/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeStatic, true);
+    GOTO_invoke(invokeStatic, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER.c b/vm/mterp/c/OP_INVOKE_SUPER.c
index e7baea4..e70e8ed 100644
--- a/vm/mterp/c/OP_INVOKE_SUPER.c
+++ b/vm/mterp/c/OP_INVOKE_SUPER.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuper, false);
+    GOTO_invoke(invokeSuper, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_JUMBO.c b/vm/mterp/c/OP_INVOKE_SUPER_JUMBO.c
new file mode 100644
index 0000000..e1e75c1
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_SUPER_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_SUPER_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeSuper, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c b/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c
index b66e033..1c9b16c 100644
--- a/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c
+++ b/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuperQuick, false);
+    GOTO_invoke(invokeSuperQuick, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c b/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c
index 879497b..4b11ccc 100644
--- a/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuperQuick, true);
+    GOTO_invoke(invokeSuperQuick, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c b/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c
index 724e3a0..fca6b3e 100644
--- a/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuper, true);
+    GOTO_invoke(invokeSuper, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL.c b/vm/mterp/c/OP_INVOKE_VIRTUAL.c
index 29a4560..894ad46 100644
--- a/vm/mterp/c/OP_INVOKE_VIRTUAL.c
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtual, false);
+    GOTO_invoke(invokeVirtual, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_JUMBO.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_JUMBO.c
new file mode 100644
index 0000000..9fa61ec
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_JUMBO.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeVirtual, true, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c
index 244fed4..7a6d540 100644
--- a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtualQuick, false);
+    GOTO_invoke(invokeVirtualQuick, false, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c
index 9adb4ad..e70446c 100644
--- a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtualQuick, true);
+    GOTO_invoke(invokeVirtualQuick, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c
index 94671ae..4c66d56 100644
--- a/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c
@@ -1,3 +1,3 @@
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtual, true);
+    GOTO_invoke(invokeVirtual, true, false);
 OP_END
diff --git a/vm/mterp/c/OP_IPUT_BOOLEAN_JUMBO.c b/vm/mterp/c/OP_IPUT_BOOLEAN_JUMBO.c
new file mode 100644
index 0000000..405ee9d
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_BOOLEAN_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BOOLEAN_JUMBO,  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_BYTE_JUMBO.c b/vm/mterp/c/OP_IPUT_BYTE_JUMBO.c
new file mode 100644
index 0000000..40a9969
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_BYTE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BYTE_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_CHAR_JUMBO.c b/vm/mterp/c/OP_IPUT_CHAR_JUMBO.c
new file mode 100644
index 0000000..170a353
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_CHAR_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_CHAR_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_JUMBO.c b/vm/mterp/c/OP_IPUT_JUMBO.c
new file mode 100644
index 0000000..2419bf2
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_JUMBO,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_OBJECT_JUMBO.c b/vm/mterp/c/OP_IPUT_OBJECT_JUMBO.c
new file mode 100644
index 0000000..47a0576
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_OBJECT_JUMBO.c
@@ -0,0 +1,13 @@
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible.  In practice, many popular VMs don't
+ * do this because it slows down a very common operation.  It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..0af17e3
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_SHORT_JUMBO.c b/vm/mterp/c/OP_IPUT_SHORT_JUMBO.c
new file mode 100644
index 0000000..41e0c44
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_SHORT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_SHORT_JUMBO,    "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IPUT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..82216c6
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_WIDE_JUMBO.c b/vm/mterp/c/OP_IPUT_WIDE_JUMBO.c
new file mode 100644
index 0000000..72a4082
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_WIDE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_WIDE_VOLATILE_JUMBO.c b/vm/mterp/c/OP_IPUT_WIDE_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..f4a2140
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_WIDE_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_MONITOR_ENTER.c b/vm/mterp/c/OP_MONITOR_ENTER.c
index c9d8999..de33483 100644
--- a/vm/mterp/c/OP_MONITOR_ENTER.c
+++ b/vm/mterp/c/OP_MONITOR_ENTER.c
@@ -9,12 +9,8 @@
         if (!checkForNullExportPC(obj, fp, pc))
             GOTO_exceptionThrown();
         ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
-        EXPORT_PC();    /* need for precise GC, also WITH_MONITOR_TRACKING */
+        EXPORT_PC();    /* need for precise GC */
         dvmLockObject(self, obj);
-#ifdef WITH_DEADLOCK_PREDICTION
-        if (dvmCheckException(self))
-            GOTO_exceptionThrown();
-#endif
     }
     FINISH(1);
 OP_END
diff --git a/vm/mterp/c/OP_NEW_ARRAY.c b/vm/mterp/c/OP_NEW_ARRAY.c
index 525c43b..6d6771a 100644
--- a/vm/mterp/c/OP_NEW_ARRAY.c
+++ b/vm/mterp/c/OP_NEW_ARRAY.c
@@ -13,7 +13,7 @@
             vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
         length = (s4) GET_REGISTER(vsrc1);
         if (length < 0) {
-            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            dvmThrowNegativeArraySizeException(length);
             GOTO_exceptionThrown();
         }
         arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
diff --git a/vm/mterp/c/OP_NEW_ARRAY_JUMBO.c b/vm/mterp/c/OP_NEW_ARRAY_JUMBO.c
new file mode 100644
index 0000000..7c0d551
--- /dev/null
+++ b/vm/mterp/c/OP_NEW_ARRAY_JUMBO.c
@@ -0,0 +1,35 @@
+HANDLE_OPCODE(OP_NEW_ARRAY_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);       /* length reg */
+        ILOGV("|new-array/jumbo v%d,v%d,class@0x%08x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowNegativeArraySizeException(length);
+            GOTO_exceptionThrown();
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO_exceptionThrown();
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(5);
+OP_END
diff --git a/vm/mterp/c/OP_NEW_INSTANCE.c b/vm/mterp/c/OP_NEW_INSTANCE.c
index f7d4c64..155434f 100644
--- a/vm/mterp/c/OP_NEW_INSTANCE.c
+++ b/vm/mterp/c/OP_NEW_INSTANCE.c
@@ -24,15 +24,15 @@
          * check is not needed for mterp.
          */
         if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
-            /* Class initialization is still ongoing - abandon the trace */
-            ABORT_JIT_TSELECT();
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
         }
 
         /*
          * Verifier now tests for interface/abstract class.
          */
         //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
-        //    dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
         //        clazz->descriptor);
         //    GOTO_exceptionThrown();
         //}
diff --git a/vm/mterp/c/OP_NEW_INSTANCE_JUMBO.c b/vm/mterp/c/OP_NEW_INSTANCE_JUMBO.c
new file mode 100644
index 0000000..2464b69
--- /dev/null
+++ b/vm/mterp/c/OP_NEW_INSTANCE_JUMBO.c
@@ -0,0 +1,45 @@
+HANDLE_OPCODE(OP_NEW_INSTANCE_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|new-instance/jumbo v%d,class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(curMethod->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO_exceptionThrown();
+
+        /*
+         * The JIT needs dvmDexGetResolvedClass() to return non-null.
+         * Since we use the portable interpreter to build the trace, this extra
+         * check is not needed for mterp.
+         */
+        if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
+        }
+
+        /*
+         * Verifier now tests for interface/abstract class.
+         */
+        //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
+        //        clazz->descriptor);
+        //    GOTO_exceptionThrown();
+        //}
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(4);
+OP_END
diff --git a/vm/mterp/c/OP_PACKED_SWITCH.c b/vm/mterp/c/OP_PACKED_SWITCH.c
index d0986dc..cca9a7c 100644
--- a/vm/mterp/c/OP_PACKED_SWITCH.c
+++ b/vm/mterp/c/OP_PACKED_SWITCH.c
@@ -14,7 +14,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            dvmThrowInternalError("bad packed switch");
             GOTO_exceptionThrown();
         }
 #endif
diff --git a/vm/mterp/c/OP_SGET_BOOLEAN_JUMBO.c b/vm/mterp/c/OP_SGET_BOOLEAN_JUMBO.c
new file mode 100644
index 0000000..b0a7525
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_BOOLEAN_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_BYTE_JUMBO.c b/vm/mterp/c/OP_SGET_BYTE_JUMBO.c
new file mode 100644
index 0000000..421cac4
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_BYTE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_BYTE_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_CHAR_JUMBO.c b/vm/mterp/c/OP_SGET_CHAR_JUMBO.c
new file mode 100644
index 0000000..71663f0
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_CHAR_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_CHAR_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_JUMBO.c b/vm/mterp/c/OP_SGET_JUMBO.c
new file mode 100644
index 0000000..460f06a
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_JUMBO,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_OBJECT_JUMBO.c b/vm/mterp/c/OP_SGET_OBJECT_JUMBO.c
new file mode 100644
index 0000000..0531c44
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_OBJECT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SGET_OBJECT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SGET_OBJECT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..b96ef5d
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_OBJECT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SGET_SHORT_JUMBO.c b/vm/mterp/c/OP_SGET_SHORT_JUMBO.c
new file mode 100644
index 0000000..fdcc727
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_SHORT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_SHORT_JUMBO,    "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SGET_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..5cf8975
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_WIDE_JUMBO.c b/vm/mterp/c/OP_SGET_WIDE_JUMBO.c
new file mode 100644
index 0000000..213b00f
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_WIDE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_SGET_WIDE_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SGET_WIDE_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..4b75c75
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_WIDE_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_SPARSE_SWITCH.c b/vm/mterp/c/OP_SPARSE_SWITCH.c
index 7f3648a..e4e04ba 100644
--- a/vm/mterp/c/OP_SPARSE_SWITCH.c
+++ b/vm/mterp/c/OP_SPARSE_SWITCH.c
@@ -14,7 +14,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            dvmThrowInternalError("bad sparse switch");
             GOTO_exceptionThrown();
         }
 #endif
diff --git a/vm/mterp/c/OP_SPUT_BOOLEAN_JUMBO.c b/vm/mterp/c/OP_SPUT_BOOLEAN_JUMBO.c
new file mode 100644
index 0000000..57b368e
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_BOOLEAN_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BOOLEAN_JUMBO,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_BYTE_JUMBO.c b/vm/mterp/c/OP_SPUT_BYTE_JUMBO.c
new file mode 100644
index 0000000..10dc04d
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_BYTE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BYTE_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_CHAR_JUMBO.c b/vm/mterp/c/OP_SPUT_CHAR_JUMBO.c
new file mode 100644
index 0000000..1e64533
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_CHAR_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_CHAR_JUMBO,     "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_JUMBO.c b/vm/mterp/c/OP_SPUT_JUMBO.c
new file mode 100644
index 0000000..f2d90a5
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_JUMBO,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_OBJECT_JUMBO.c b/vm/mterp/c/OP_SPUT_OBJECT_JUMBO.c
new file mode 100644
index 0000000..e79e25a
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_OBJECT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..4f60a5d
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_SHORT_JUMBO.c b/vm/mterp/c/OP_SPUT_SHORT_JUMBO.c
new file mode 100644
index 0000000..8c82392
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_SHORT_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_SHORT_JUMBO,    "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SPUT_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..845cc83
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_WIDE_JUMBO.c b/vm/mterp/c/OP_SPUT_WIDE_JUMBO.c
new file mode 100644
index 0000000..965eeb6
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_WIDE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_WIDE_VOLATILE_JUMBO.c b/vm/mterp/c/OP_SPUT_WIDE_VOLATILE_JUMBO.c
new file mode 100644
index 0000000..3a86294
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_WIDE_VOLATILE_JUMBO.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_THROW_VERIFICATION_ERROR_JUMBO.c b/vm/mterp/c/OP_THROW_VERIFICATION_ERROR_JUMBO.c
new file mode 100644
index 0000000..c4607ec
--- /dev/null
+++ b/vm/mterp/c/OP_THROW_VERIFICATION_ERROR_JUMBO.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_THROW_VERIFICATION_ERROR_JUMBO)
+    EXPORT_PC();
+    vsrc1 = FETCH(3);
+    ref = FETCH(1) | (u4)FETCH(2) << 16;      /* class/field/method ref */
+    dvmThrowVerificationError(curMethod, vsrc1, ref);
+    GOTO_exceptionThrown();
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_27FF.c b/vm/mterp/c/OP_UNUSED_27FF.c
new file mode 100644
index 0000000..804138c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_27FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_27FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_28FF.c b/vm/mterp/c/OP_UNUSED_28FF.c
new file mode 100644
index 0000000..f6e01f6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_28FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_28FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_29FF.c b/vm/mterp/c/OP_UNUSED_29FF.c
new file mode 100644
index 0000000..0a14c5f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_29FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_29FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2AFF.c b/vm/mterp/c/OP_UNUSED_2AFF.c
new file mode 100644
index 0000000..701561a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2BFF.c b/vm/mterp/c/OP_UNUSED_2BFF.c
new file mode 100644
index 0000000..a73366b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2CFF.c b/vm/mterp/c/OP_UNUSED_2CFF.c
new file mode 100644
index 0000000..a220b03
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2DFF.c b/vm/mterp/c/OP_UNUSED_2DFF.c
new file mode 100644
index 0000000..2d4ba4e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2EFF.c b/vm/mterp/c/OP_UNUSED_2EFF.c
new file mode 100644
index 0000000..49d7fa9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_2FFF.c b/vm/mterp/c/OP_UNUSED_2FFF.c
new file mode 100644
index 0000000..9326d05
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_2FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_2FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_30FF.c b/vm/mterp/c/OP_UNUSED_30FF.c
new file mode 100644
index 0000000..f36814e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_30FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_30FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_31FF.c b/vm/mterp/c/OP_UNUSED_31FF.c
new file mode 100644
index 0000000..20ab58b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_31FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_31FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_32FF.c b/vm/mterp/c/OP_UNUSED_32FF.c
new file mode 100644
index 0000000..459b165
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_32FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_32FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_33FF.c b/vm/mterp/c/OP_UNUSED_33FF.c
new file mode 100644
index 0000000..83fb82c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_33FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_33FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_34FF.c b/vm/mterp/c/OP_UNUSED_34FF.c
new file mode 100644
index 0000000..d9e7bb0
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_34FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_34FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_35FF.c b/vm/mterp/c/OP_UNUSED_35FF.c
new file mode 100644
index 0000000..2ed7b34
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_35FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_35FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_36FF.c b/vm/mterp/c/OP_UNUSED_36FF.c
new file mode 100644
index 0000000..2770594
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_36FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_36FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_37FF.c b/vm/mterp/c/OP_UNUSED_37FF.c
new file mode 100644
index 0000000..206b6a6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_37FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_37FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_38FF.c b/vm/mterp/c/OP_UNUSED_38FF.c
new file mode 100644
index 0000000..68c94a0
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_38FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_38FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_39FF.c b/vm/mterp/c/OP_UNUSED_39FF.c
new file mode 100644
index 0000000..c003a87
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_39FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_39FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3AFF.c b/vm/mterp/c/OP_UNUSED_3AFF.c
new file mode 100644
index 0000000..b43e356
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3BFF.c b/vm/mterp/c/OP_UNUSED_3BFF.c
new file mode 100644
index 0000000..2188336
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3CFF.c b/vm/mterp/c/OP_UNUSED_3CFF.c
new file mode 100644
index 0000000..f446d40
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3DFF.c b/vm/mterp/c/OP_UNUSED_3DFF.c
new file mode 100644
index 0000000..f57cd64
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3EFF.c b/vm/mterp/c/OP_UNUSED_3EFF.c
new file mode 100644
index 0000000..b81647f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3FFF.c b/vm/mterp/c/OP_UNUSED_3FFF.c
new file mode 100644
index 0000000..adfd65e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_40FF.c b/vm/mterp/c/OP_UNUSED_40FF.c
new file mode 100644
index 0000000..aa87b39
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_40FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_40FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_41FF.c b/vm/mterp/c/OP_UNUSED_41FF.c
new file mode 100644
index 0000000..a2a3894
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_41FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_41FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_42FF.c b/vm/mterp/c/OP_UNUSED_42FF.c
new file mode 100644
index 0000000..edd4393
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_42FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_42FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_43FF.c b/vm/mterp/c/OP_UNUSED_43FF.c
new file mode 100644
index 0000000..6e616eb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_43FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_43FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_44FF.c b/vm/mterp/c/OP_UNUSED_44FF.c
new file mode 100644
index 0000000..0eee91f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_44FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_44FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_45FF.c b/vm/mterp/c/OP_UNUSED_45FF.c
new file mode 100644
index 0000000..4a6b48e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_45FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_45FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_46FF.c b/vm/mterp/c/OP_UNUSED_46FF.c
new file mode 100644
index 0000000..e1c940e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_46FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_46FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_47FF.c b/vm/mterp/c/OP_UNUSED_47FF.c
new file mode 100644
index 0000000..94df8bd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_47FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_47FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_48FF.c b/vm/mterp/c/OP_UNUSED_48FF.c
new file mode 100644
index 0000000..1e2acdb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_48FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_48FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_49FF.c b/vm/mterp/c/OP_UNUSED_49FF.c
new file mode 100644
index 0000000..b86d451
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_49FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_49FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4AFF.c b/vm/mterp/c/OP_UNUSED_4AFF.c
new file mode 100644
index 0000000..9827b34
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4BFF.c b/vm/mterp/c/OP_UNUSED_4BFF.c
new file mode 100644
index 0000000..9e26529
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4CFF.c b/vm/mterp/c/OP_UNUSED_4CFF.c
new file mode 100644
index 0000000..f21fa48
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4DFF.c b/vm/mterp/c/OP_UNUSED_4DFF.c
new file mode 100644
index 0000000..d596149
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4EFF.c b/vm/mterp/c/OP_UNUSED_4EFF.c
new file mode 100644
index 0000000..7636c23
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_4FFF.c b/vm/mterp/c/OP_UNUSED_4FFF.c
new file mode 100644
index 0000000..5d20689
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_4FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_4FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_50FF.c b/vm/mterp/c/OP_UNUSED_50FF.c
new file mode 100644
index 0000000..4c577be
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_50FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_50FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_51FF.c b/vm/mterp/c/OP_UNUSED_51FF.c
new file mode 100644
index 0000000..e4a50d8
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_51FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_51FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_52FF.c b/vm/mterp/c/OP_UNUSED_52FF.c
new file mode 100644
index 0000000..0338e92
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_52FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_52FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_53FF.c b/vm/mterp/c/OP_UNUSED_53FF.c
new file mode 100644
index 0000000..a19aec9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_53FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_53FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_54FF.c b/vm/mterp/c/OP_UNUSED_54FF.c
new file mode 100644
index 0000000..5f1f708
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_54FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_54FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_55FF.c b/vm/mterp/c/OP_UNUSED_55FF.c
new file mode 100644
index 0000000..3cc25e6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_55FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_55FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_56FF.c b/vm/mterp/c/OP_UNUSED_56FF.c
new file mode 100644
index 0000000..b41be0f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_56FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_56FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_57FF.c b/vm/mterp/c/OP_UNUSED_57FF.c
new file mode 100644
index 0000000..c0e8dd5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_57FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_57FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_58FF.c b/vm/mterp/c/OP_UNUSED_58FF.c
new file mode 100644
index 0000000..9c0b8b0
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_58FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_58FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_59FF.c b/vm/mterp/c/OP_UNUSED_59FF.c
new file mode 100644
index 0000000..7c28662
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_59FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_59FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5AFF.c b/vm/mterp/c/OP_UNUSED_5AFF.c
new file mode 100644
index 0000000..50c77bc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5BFF.c b/vm/mterp/c/OP_UNUSED_5BFF.c
new file mode 100644
index 0000000..a145bf4
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5CFF.c b/vm/mterp/c/OP_UNUSED_5CFF.c
new file mode 100644
index 0000000..821bdcd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5DFF.c b/vm/mterp/c/OP_UNUSED_5DFF.c
new file mode 100644
index 0000000..982b1c2
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5EFF.c b/vm/mterp/c/OP_UNUSED_5EFF.c
new file mode 100644
index 0000000..d0157f7
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_5FFF.c b/vm/mterp/c/OP_UNUSED_5FFF.c
new file mode 100644
index 0000000..3e18904
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_5FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_5FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_60FF.c b/vm/mterp/c/OP_UNUSED_60FF.c
new file mode 100644
index 0000000..96b15c6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_60FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_60FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_61FF.c b/vm/mterp/c/OP_UNUSED_61FF.c
new file mode 100644
index 0000000..91a8a30
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_61FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_61FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_62FF.c b/vm/mterp/c/OP_UNUSED_62FF.c
new file mode 100644
index 0000000..b3bb114
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_62FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_62FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_63FF.c b/vm/mterp/c/OP_UNUSED_63FF.c
new file mode 100644
index 0000000..ea14458
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_63FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_63FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_64FF.c b/vm/mterp/c/OP_UNUSED_64FF.c
new file mode 100644
index 0000000..713277e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_64FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_64FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_65FF.c b/vm/mterp/c/OP_UNUSED_65FF.c
new file mode 100644
index 0000000..6f73854
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_65FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_65FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_66FF.c b/vm/mterp/c/OP_UNUSED_66FF.c
new file mode 100644
index 0000000..a7ac805
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_66FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_66FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_67FF.c b/vm/mterp/c/OP_UNUSED_67FF.c
new file mode 100644
index 0000000..16d1155
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_67FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_67FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_68FF.c b/vm/mterp/c/OP_UNUSED_68FF.c
new file mode 100644
index 0000000..9b4eed7
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_68FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_68FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_69FF.c b/vm/mterp/c/OP_UNUSED_69FF.c
new file mode 100644
index 0000000..1bfdae9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_69FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_69FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6AFF.c b/vm/mterp/c/OP_UNUSED_6AFF.c
new file mode 100644
index 0000000..1f6dab0
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6BFF.c b/vm/mterp/c/OP_UNUSED_6BFF.c
new file mode 100644
index 0000000..3739cfd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6CFF.c b/vm/mterp/c/OP_UNUSED_6CFF.c
new file mode 100644
index 0000000..28df1ff
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6DFF.c b/vm/mterp/c/OP_UNUSED_6DFF.c
new file mode 100644
index 0000000..436064a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6EFF.c b/vm/mterp/c/OP_UNUSED_6EFF.c
new file mode 100644
index 0000000..c5c3720
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_6FFF.c b/vm/mterp/c/OP_UNUSED_6FFF.c
new file mode 100644
index 0000000..5bab85a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_6FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_6FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_70FF.c b/vm/mterp/c/OP_UNUSED_70FF.c
new file mode 100644
index 0000000..15cb3cc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_70FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_70FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_71FF.c b/vm/mterp/c/OP_UNUSED_71FF.c
new file mode 100644
index 0000000..3669855
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_71FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_71FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_72FF.c b/vm/mterp/c/OP_UNUSED_72FF.c
new file mode 100644
index 0000000..66b42ba
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_72FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_72FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_73FF.c b/vm/mterp/c/OP_UNUSED_73FF.c
new file mode 100644
index 0000000..1832581
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_73FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_73FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_74FF.c b/vm/mterp/c/OP_UNUSED_74FF.c
new file mode 100644
index 0000000..7f73d09
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_74FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_74FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_75FF.c b/vm/mterp/c/OP_UNUSED_75FF.c
new file mode 100644
index 0000000..d96b4aa
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_75FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_75FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_76FF.c b/vm/mterp/c/OP_UNUSED_76FF.c
new file mode 100644
index 0000000..b38cdf1
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_76FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_76FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_77FF.c b/vm/mterp/c/OP_UNUSED_77FF.c
new file mode 100644
index 0000000..dc128bb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_77FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_77FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_78FF.c b/vm/mterp/c/OP_UNUSED_78FF.c
new file mode 100644
index 0000000..5ae4223
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_78FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_78FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_79FF.c b/vm/mterp/c/OP_UNUSED_79FF.c
new file mode 100644
index 0000000..4d8f99d
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_79FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_79FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7AFF.c b/vm/mterp/c/OP_UNUSED_7AFF.c
new file mode 100644
index 0000000..93ea5a2
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7BFF.c b/vm/mterp/c/OP_UNUSED_7BFF.c
new file mode 100644
index 0000000..6e4d99b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7CFF.c b/vm/mterp/c/OP_UNUSED_7CFF.c
new file mode 100644
index 0000000..f9bdd15
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7DFF.c b/vm/mterp/c/OP_UNUSED_7DFF.c
new file mode 100644
index 0000000..198d71f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7EFF.c b/vm/mterp/c/OP_UNUSED_7EFF.c
new file mode 100644
index 0000000..49dce13
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7FFF.c b/vm/mterp/c/OP_UNUSED_7FFF.c
new file mode 100644
index 0000000..4e8588f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_80FF.c b/vm/mterp/c/OP_UNUSED_80FF.c
new file mode 100644
index 0000000..290d5d7
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_80FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_80FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_81FF.c b/vm/mterp/c/OP_UNUSED_81FF.c
new file mode 100644
index 0000000..21d8385
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_81FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_81FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_82FF.c b/vm/mterp/c/OP_UNUSED_82FF.c
new file mode 100644
index 0000000..6ae50cf
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_82FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_82FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_83FF.c b/vm/mterp/c/OP_UNUSED_83FF.c
new file mode 100644
index 0000000..807a1d3
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_83FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_83FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_84FF.c b/vm/mterp/c/OP_UNUSED_84FF.c
new file mode 100644
index 0000000..4764220
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_84FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_84FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_85FF.c b/vm/mterp/c/OP_UNUSED_85FF.c
new file mode 100644
index 0000000..2722a49
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_85FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_85FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_86FF.c b/vm/mterp/c/OP_UNUSED_86FF.c
new file mode 100644
index 0000000..7dadccc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_86FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_86FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_87FF.c b/vm/mterp/c/OP_UNUSED_87FF.c
new file mode 100644
index 0000000..7de2178
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_87FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_87FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_88FF.c b/vm/mterp/c/OP_UNUSED_88FF.c
new file mode 100644
index 0000000..e6cf015
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_88FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_88FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_89FF.c b/vm/mterp/c/OP_UNUSED_89FF.c
new file mode 100644
index 0000000..5f23acf
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_89FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_89FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8AFF.c b/vm/mterp/c/OP_UNUSED_8AFF.c
new file mode 100644
index 0000000..9582011
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8BFF.c b/vm/mterp/c/OP_UNUSED_8BFF.c
new file mode 100644
index 0000000..2c37dc3
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8CFF.c b/vm/mterp/c/OP_UNUSED_8CFF.c
new file mode 100644
index 0000000..bd67024
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8DFF.c b/vm/mterp/c/OP_UNUSED_8DFF.c
new file mode 100644
index 0000000..c379d2e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8EFF.c b/vm/mterp/c/OP_UNUSED_8EFF.c
new file mode 100644
index 0000000..e78839b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_8FFF.c b/vm/mterp/c/OP_UNUSED_8FFF.c
new file mode 100644
index 0000000..c911a0e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_8FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_8FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_90FF.c b/vm/mterp/c/OP_UNUSED_90FF.c
new file mode 100644
index 0000000..e8a35fc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_90FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_90FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_91FF.c b/vm/mterp/c/OP_UNUSED_91FF.c
new file mode 100644
index 0000000..e7fc01a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_91FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_91FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_92FF.c b/vm/mterp/c/OP_UNUSED_92FF.c
new file mode 100644
index 0000000..612bd57
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_92FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_92FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_93FF.c b/vm/mterp/c/OP_UNUSED_93FF.c
new file mode 100644
index 0000000..9b69187
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_93FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_93FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_94FF.c b/vm/mterp/c/OP_UNUSED_94FF.c
new file mode 100644
index 0000000..022c1eb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_94FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_94FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_95FF.c b/vm/mterp/c/OP_UNUSED_95FF.c
new file mode 100644
index 0000000..51d7467
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_95FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_95FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_96FF.c b/vm/mterp/c/OP_UNUSED_96FF.c
new file mode 100644
index 0000000..4067af1
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_96FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_96FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_97FF.c b/vm/mterp/c/OP_UNUSED_97FF.c
new file mode 100644
index 0000000..b4b4a77
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_97FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_97FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_98FF.c b/vm/mterp/c/OP_UNUSED_98FF.c
new file mode 100644
index 0000000..364aa4c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_98FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_98FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_99FF.c b/vm/mterp/c/OP_UNUSED_99FF.c
new file mode 100644
index 0000000..e4c2fd5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_99FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_99FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9AFF.c b/vm/mterp/c/OP_UNUSED_9AFF.c
new file mode 100644
index 0000000..bce58e5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9AFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9AFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9BFF.c b/vm/mterp/c/OP_UNUSED_9BFF.c
new file mode 100644
index 0000000..b875b6e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9BFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9BFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9CFF.c b/vm/mterp/c/OP_UNUSED_9CFF.c
new file mode 100644
index 0000000..9933e47
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9CFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9CFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9DFF.c b/vm/mterp/c/OP_UNUSED_9DFF.c
new file mode 100644
index 0000000..425a685
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9DFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9DFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9EFF.c b/vm/mterp/c/OP_UNUSED_9EFF.c
new file mode 100644
index 0000000..ae4b842
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9EFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9EFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_9FFF.c b/vm/mterp/c/OP_UNUSED_9FFF.c
new file mode 100644
index 0000000..fbb0564
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_9FFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_9FFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A0FF.c b/vm/mterp/c/OP_UNUSED_A0FF.c
new file mode 100644
index 0000000..546357a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A1FF.c b/vm/mterp/c/OP_UNUSED_A1FF.c
new file mode 100644
index 0000000..033c5ba
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A1FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A1FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A2FF.c b/vm/mterp/c/OP_UNUSED_A2FF.c
new file mode 100644
index 0000000..10ba36a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A2FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A2FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A3FF.c b/vm/mterp/c/OP_UNUSED_A3FF.c
new file mode 100644
index 0000000..e1eb866
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A3FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A3FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A4FF.c b/vm/mterp/c/OP_UNUSED_A4FF.c
new file mode 100644
index 0000000..515cde3
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A4FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A4FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A5FF.c b/vm/mterp/c/OP_UNUSED_A5FF.c
new file mode 100644
index 0000000..15999ba
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A5FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A5FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A6FF.c b/vm/mterp/c/OP_UNUSED_A6FF.c
new file mode 100644
index 0000000..2d85c0a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A6FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A6FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A7FF.c b/vm/mterp/c/OP_UNUSED_A7FF.c
new file mode 100644
index 0000000..9628590
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A7FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A7FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A8FF.c b/vm/mterp/c/OP_UNUSED_A8FF.c
new file mode 100644
index 0000000..11ace32
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A8FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A8FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_A9FF.c b/vm/mterp/c/OP_UNUSED_A9FF.c
new file mode 100644
index 0000000..71dfabc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_A9FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_A9FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_AAFF.c b/vm/mterp/c/OP_UNUSED_AAFF.c
new file mode 100644
index 0000000..01a7491
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_AAFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_AAFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_ABFF.c b/vm/mterp/c/OP_UNUSED_ABFF.c
new file mode 100644
index 0000000..942aa78
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_ABFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_ABFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_ACFF.c b/vm/mterp/c/OP_UNUSED_ACFF.c
new file mode 100644
index 0000000..82f1285
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_ACFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_ACFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_ADFF.c b/vm/mterp/c/OP_UNUSED_ADFF.c
new file mode 100644
index 0000000..3e11ea6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_ADFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_ADFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_AEFF.c b/vm/mterp/c/OP_UNUSED_AEFF.c
new file mode 100644
index 0000000..586e745
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_AEFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_AEFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_AFFF.c b/vm/mterp/c/OP_UNUSED_AFFF.c
new file mode 100644
index 0000000..5ed1161
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_AFFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_AFFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B0FF.c b/vm/mterp/c/OP_UNUSED_B0FF.c
new file mode 100644
index 0000000..3060736
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B1FF.c b/vm/mterp/c/OP_UNUSED_B1FF.c
new file mode 100644
index 0000000..87bb7a6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B1FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B1FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B2FF.c b/vm/mterp/c/OP_UNUSED_B2FF.c
new file mode 100644
index 0000000..2bca4cd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B2FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B2FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B3FF.c b/vm/mterp/c/OP_UNUSED_B3FF.c
new file mode 100644
index 0000000..3f17d1f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B3FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B3FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B4FF.c b/vm/mterp/c/OP_UNUSED_B4FF.c
new file mode 100644
index 0000000..be957a6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B4FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B4FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B5FF.c b/vm/mterp/c/OP_UNUSED_B5FF.c
new file mode 100644
index 0000000..239da06
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B5FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B5FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B6FF.c b/vm/mterp/c/OP_UNUSED_B6FF.c
new file mode 100644
index 0000000..e8fabb4
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B6FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B6FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B7FF.c b/vm/mterp/c/OP_UNUSED_B7FF.c
new file mode 100644
index 0000000..bdd05bd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B7FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B7FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B8FF.c b/vm/mterp/c/OP_UNUSED_B8FF.c
new file mode 100644
index 0000000..c856720
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B8FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B8FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_B9FF.c b/vm/mterp/c/OP_UNUSED_B9FF.c
new file mode 100644
index 0000000..c3a963b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_B9FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_B9FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BAFF.c b/vm/mterp/c/OP_UNUSED_BAFF.c
new file mode 100644
index 0000000..d75e1cc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BAFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BAFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BBFF.c b/vm/mterp/c/OP_UNUSED_BBFF.c
new file mode 100644
index 0000000..3743698
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BBFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BBFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BCFF.c b/vm/mterp/c/OP_UNUSED_BCFF.c
new file mode 100644
index 0000000..6358423
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BCFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BCFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BDFF.c b/vm/mterp/c/OP_UNUSED_BDFF.c
new file mode 100644
index 0000000..36f176f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BDFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BDFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BEFF.c b/vm/mterp/c/OP_UNUSED_BEFF.c
new file mode 100644
index 0000000..817adca
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BEFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BEFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_BFFF.c b/vm/mterp/c/OP_UNUSED_BFFF.c
new file mode 100644
index 0000000..d318588
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_BFFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_BFFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C0FF.c b/vm/mterp/c/OP_UNUSED_C0FF.c
new file mode 100644
index 0000000..c87f906
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C1FF.c b/vm/mterp/c/OP_UNUSED_C1FF.c
new file mode 100644
index 0000000..7c1be52
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C1FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C1FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C2FF.c b/vm/mterp/c/OP_UNUSED_C2FF.c
new file mode 100644
index 0000000..06e03f4
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C2FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C2FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C3FF.c b/vm/mterp/c/OP_UNUSED_C3FF.c
new file mode 100644
index 0000000..b480c30
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C3FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C3FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C4FF.c b/vm/mterp/c/OP_UNUSED_C4FF.c
new file mode 100644
index 0000000..b7afb1d
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C4FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C4FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C5FF.c b/vm/mterp/c/OP_UNUSED_C5FF.c
new file mode 100644
index 0000000..432a4cd
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C5FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C5FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C6FF.c b/vm/mterp/c/OP_UNUSED_C6FF.c
new file mode 100644
index 0000000..4f8f2a5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C6FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C6FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C7FF.c b/vm/mterp/c/OP_UNUSED_C7FF.c
new file mode 100644
index 0000000..92f2c6a
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C7FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C7FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C8FF.c b/vm/mterp/c/OP_UNUSED_C8FF.c
new file mode 100644
index 0000000..33c4f59
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C8FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C8FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_C9FF.c b/vm/mterp/c/OP_UNUSED_C9FF.c
new file mode 100644
index 0000000..e04d233
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_C9FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_C9FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CAFF.c b/vm/mterp/c/OP_UNUSED_CAFF.c
new file mode 100644
index 0000000..da3e0a5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CAFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CAFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CBFF.c b/vm/mterp/c/OP_UNUSED_CBFF.c
new file mode 100644
index 0000000..cf809b6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CBFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CBFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CCFF.c b/vm/mterp/c/OP_UNUSED_CCFF.c
new file mode 100644
index 0000000..7d843d3
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CCFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CCFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CDFF.c b/vm/mterp/c/OP_UNUSED_CDFF.c
new file mode 100644
index 0000000..553be78
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CDFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CDFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CEFF.c b/vm/mterp/c/OP_UNUSED_CEFF.c
new file mode 100644
index 0000000..01e933f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CEFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CEFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_CFFF.c b/vm/mterp/c/OP_UNUSED_CFFF.c
new file mode 100644
index 0000000..806aa2f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_CFFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_CFFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D0FF.c b/vm/mterp/c/OP_UNUSED_D0FF.c
new file mode 100644
index 0000000..e2bb5a1
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D1FF.c b/vm/mterp/c/OP_UNUSED_D1FF.c
new file mode 100644
index 0000000..0d91cca
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D1FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D1FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D2FF.c b/vm/mterp/c/OP_UNUSED_D2FF.c
new file mode 100644
index 0000000..8a6db76
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D2FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D2FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D3FF.c b/vm/mterp/c/OP_UNUSED_D3FF.c
new file mode 100644
index 0000000..32205bb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D3FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D3FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D4FF.c b/vm/mterp/c/OP_UNUSED_D4FF.c
new file mode 100644
index 0000000..0413ed6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D4FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D4FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D5FF.c b/vm/mterp/c/OP_UNUSED_D5FF.c
new file mode 100644
index 0000000..cc67e9d
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D5FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D5FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D6FF.c b/vm/mterp/c/OP_UNUSED_D6FF.c
new file mode 100644
index 0000000..3711bbc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D6FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D6FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D7FF.c b/vm/mterp/c/OP_UNUSED_D7FF.c
new file mode 100644
index 0000000..6a17a2d
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D7FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D7FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D8FF.c b/vm/mterp/c/OP_UNUSED_D8FF.c
new file mode 100644
index 0000000..c934090
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D8FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D8FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_D9FF.c b/vm/mterp/c/OP_UNUSED_D9FF.c
new file mode 100644
index 0000000..78984af
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_D9FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_D9FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DAFF.c b/vm/mterp/c/OP_UNUSED_DAFF.c
new file mode 100644
index 0000000..2a177f0
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DAFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DAFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DBFF.c b/vm/mterp/c/OP_UNUSED_DBFF.c
new file mode 100644
index 0000000..5447dc7
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DBFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DBFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DCFF.c b/vm/mterp/c/OP_UNUSED_DCFF.c
new file mode 100644
index 0000000..a6ae5de
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DCFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DCFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DDFF.c b/vm/mterp/c/OP_UNUSED_DDFF.c
new file mode 100644
index 0000000..a18cbbe
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DDFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DDFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DEFF.c b/vm/mterp/c/OP_UNUSED_DEFF.c
new file mode 100644
index 0000000..c9be0ed
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DEFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DEFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_DFFF.c b/vm/mterp/c/OP_UNUSED_DFFF.c
new file mode 100644
index 0000000..4d455ee
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_DFFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_DFFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E0FF.c b/vm/mterp/c/OP_UNUSED_E0FF.c
new file mode 100644
index 0000000..9507bcb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E1FF.c b/vm/mterp/c/OP_UNUSED_E1FF.c
new file mode 100644
index 0000000..84f6eed
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E1FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E1FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E2FF.c b/vm/mterp/c/OP_UNUSED_E2FF.c
new file mode 100644
index 0000000..a6153cc
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E2FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E2FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E3FF.c b/vm/mterp/c/OP_UNUSED_E3FF.c
new file mode 100644
index 0000000..fc0181f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E3FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E3FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E4FF.c b/vm/mterp/c/OP_UNUSED_E4FF.c
new file mode 100644
index 0000000..cc11656
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E4FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E4FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E5FF.c b/vm/mterp/c/OP_UNUSED_E5FF.c
new file mode 100644
index 0000000..1c40042
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E5FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E5FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E6FF.c b/vm/mterp/c/OP_UNUSED_E6FF.c
new file mode 100644
index 0000000..3686579
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E6FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E6FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E7FF.c b/vm/mterp/c/OP_UNUSED_E7FF.c
new file mode 100644
index 0000000..060be13
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E7FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E7FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E8FF.c b/vm/mterp/c/OP_UNUSED_E8FF.c
new file mode 100644
index 0000000..436883b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E8FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E8FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E9FF.c b/vm/mterp/c/OP_UNUSED_E9FF.c
new file mode 100644
index 0000000..7c0cd56
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E9FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E9FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EAFF.c b/vm/mterp/c/OP_UNUSED_EAFF.c
new file mode 100644
index 0000000..cb33407
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EAFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EAFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EBFF.c b/vm/mterp/c/OP_UNUSED_EBFF.c
new file mode 100644
index 0000000..16f7a20
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EBFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EBFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_ECFF.c b/vm/mterp/c/OP_UNUSED_ECFF.c
new file mode 100644
index 0000000..7ae6372
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_ECFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_ECFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EDFF.c b/vm/mterp/c/OP_UNUSED_EDFF.c
new file mode 100644
index 0000000..d6528a1
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EDFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EDFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EEFF.c b/vm/mterp/c/OP_UNUSED_EEFF.c
new file mode 100644
index 0000000..24918ef
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EEFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EEFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EFFF.c b/vm/mterp/c/OP_UNUSED_EFFF.c
new file mode 100644
index 0000000..f15c2be
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EFFF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EFFF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_F0FF.c b/vm/mterp/c/OP_UNUSED_F0FF.c
new file mode 100644
index 0000000..f9049b5
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_F0FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_F0FF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_F1FF.c b/vm/mterp/c/OP_UNUSED_F1FF.c
new file mode 100644
index 0000000..a3a65d7
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_F1FF.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_UNUSED_F1FF)
+    /*
+     * In portable interp, most unused opcodes will fall through to here.
+     */
+    LOGE("unknown opcode 0x%04x\n", inst);
+    dvmAbort();
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/gotoTargets.c b/vm/mterp/c/gotoTargets.c
index 0db6fb7..fda13eb 100644
--- a/vm/mterp/c/gotoTargets.c
+++ b/vm/mterp/c/gotoTargets.c
@@ -8,7 +8,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -19,19 +19,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -45,7 +54,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -61,13 +70,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -100,35 +108,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -169,8 +191,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -200,26 +221,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -254,15 +286,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -278,32 +308,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -328,25 +369,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -363,19 +415,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -392,13 +453,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -435,13 +496,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -457,7 +517,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -482,11 +542,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -502,8 +562,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -551,7 +610,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -605,8 +664,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -638,7 +697,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -663,7 +722,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -896,7 +955,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -909,18 +968,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -947,7 +1002,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
diff --git a/vm/mterp/c/header.c b/vm/mterp/c/header.c
index aaf6dab..4f2cabe 100644
--- a/vm/mterp/c/header.c
+++ b/vm/mterp/c/header.c
@@ -51,24 +51,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -164,12 +171,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -183,10 +188,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -200,12 +203,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -219,10 +220,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -311,10 +310,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -353,7 +352,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -385,7 +384,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
diff --git a/vm/mterp/c/opcommon.c b/vm/mterp/c/opcommon.c
index 9c7da89..0cb3547 100644
--- a/vm/mterp/c/opcommon.c
+++ b/vm/mterp/c/opcommon.c
@@ -1,12 +1,12 @@
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -149,8 +149,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -196,9 +195,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -231,8 +229,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -277,8 +274,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -320,8 +316,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -367,8 +362,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -458,7 +452,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -482,7 +477,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -535,6 +531,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -580,6 +604,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -617,7 +669,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -627,6 +679,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -641,7 +717,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -650,3 +726,27 @@
         UPDATE_FIELD_PUT(&sfield->field);                                   \
     }                                                                       \
     FINISH(2);
+
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index aeed88b..e699f20 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -81,39 +81,6 @@
  * values are incorrect.
  */
 
-/* globals (sanity check for LDR vs LDRB) */
-MTERP_SIZEOF(sizeofGlobal_debuggerActive, gDvm.debuggerActive, 1)
-MTERP_SIZEOF(sizeofGlobal_activeProfilers, gDvm.activeProfilers, 4)
-
-/* MterpGlue fields */
-MTERP_OFFSET(offGlue_pc,                MterpGlue, pc, 0)
-MTERP_OFFSET(offGlue_fp,                MterpGlue, fp, 4)
-MTERP_OFFSET(offGlue_retval,            MterpGlue, retval, 8)
-MTERP_OFFSET(offGlue_method,            MterpGlue, method, 16)
-MTERP_OFFSET(offGlue_methodClassDex,    MterpGlue, methodClassDex, 20)
-MTERP_OFFSET(offGlue_self,              MterpGlue, self, 24)
-MTERP_OFFSET(offGlue_bailPtr,           MterpGlue, bailPtr, 28)
-MTERP_OFFSET(offGlue_interpStackEnd,    MterpGlue, interpStackEnd, 32)
-MTERP_OFFSET(offGlue_pSelfSuspendCount, MterpGlue, pSelfSuspendCount, 36)
-MTERP_OFFSET(offGlue_cardTable,         MterpGlue, cardTable, 40)
-MTERP_OFFSET(offGlue_pDebuggerActive,   MterpGlue, pDebuggerActive, 44)
-MTERP_OFFSET(offGlue_pActiveProfilers,  MterpGlue, pActiveProfilers, 48)
-MTERP_OFFSET(offGlue_entryPoint,        MterpGlue, entryPoint, 52)
-#if defined(WITH_JIT)
-MTERP_OFFSET(offGlue_pJitProfTable,     MterpGlue, pJitProfTable, 60)
-MTERP_OFFSET(offGlue_jitState,          MterpGlue, jitState, 64)
-MTERP_OFFSET(offGlue_jitResumeNPC,      MterpGlue, jitResumeNPC, 68)
-MTERP_OFFSET(offGlue_jitResumeDPC,      MterpGlue, jitResumeDPC, 72)
-MTERP_OFFSET(offGlue_jitThreshold,      MterpGlue, jitThreshold, 76)
-MTERP_OFFSET(offGlue_ppJitProfTable,    MterpGlue, ppJitProfTable, 80)
-MTERP_OFFSET(offGlue_icRechainCount,    MterpGlue, icRechainCount, 84)
-#endif
-/* make sure all JValue union members are stored at the same offset */
-MTERP_OFFSET(offGlue_retval_z,          MterpGlue, retval.z, 8)
-MTERP_OFFSET(offGlue_retval_i,          MterpGlue, retval.i, 8)
-MTERP_OFFSET(offGlue_retval_j,          MterpGlue, retval.j, 8)
-MTERP_OFFSET(offGlue_retval_l,          MterpGlue, retval.l, 8)
-
 /* DvmDex fields */
 MTERP_OFFSET(offDvmDex_pResStrings,     DvmDex, pResStrings, 8)
 MTERP_OFFSET(offDvmDex_pResClasses,     DvmDex, pResClasses, 12)
@@ -147,11 +114,13 @@
 #if defined(WITH_JIT) && defined(WITH_SELF_VERIFICATION)
 MTERP_OFFSET(offShadowSpace_startPC,     ShadowSpace, startPC, 0)
 MTERP_OFFSET(offShadowSpace_fp,          ShadowSpace, fp, 4)
-MTERP_OFFSET(offShadowSpace_glue,        ShadowSpace, glue, 8)
-MTERP_OFFSET(offShadowSpace_jitExitState,ShadowSpace, jitExitState, 12)
-MTERP_OFFSET(offShadowSpace_svState,     ShadowSpace, selfVerificationState, 16)
-MTERP_OFFSET(offShadowSpace_shadowFP,    ShadowSpace, shadowFP, 24)
-MTERP_OFFSET(offShadowSpace_interpState, ShadowSpace, interpState, 32)
+MTERP_OFFSET(offShadowSpace_method,      ShadowSpace, method, 8)
+MTERP_OFFSET(offShadowSpace_methodClassDex, ShadowSpace, methodClassDex, 12)
+MTERP_OFFSET(offShadowSpace_retval,      ShadowSpace, retval, 16)
+MTERP_OFFSET(offShadowSpace_interpStackEnd, ShadowSpace, interpStackEnd, 24)
+MTERP_OFFSET(offShadowSpace_jitExitState,ShadowSpace, jitExitState, 28)
+MTERP_OFFSET(offShadowSpace_svState,     ShadowSpace, selfVerificationState, 32)
+MTERP_OFFSET(offShadowSpace_shadowFP,    ShadowSpace, shadowFP, 40)
 #endif
 
 /* InstField fields */
@@ -185,38 +154,51 @@
 MTERP_OFFSET(offInlineOperation_func,   InlineOperation, func, 0)
 
 /* Thread fields */
-MTERP_OFFSET(offThread_stackOverflowed, Thread, stackOverflowed, 36)
-MTERP_OFFSET(offThread_curFrame,        Thread, curFrame, 40)
-MTERP_OFFSET(offThread_exception,       Thread, exception, 44)
+MTERP_OFFSET(offThread_pc,                InterpSaveState, pc, 0)
+MTERP_OFFSET(offThread_fp,                InterpSaveState, fp, 4)
+MTERP_OFFSET(offThread_method,            InterpSaveState, method, 8)
+MTERP_OFFSET(offThread_methodClassDex,    InterpSaveState, methodClassDex, 12)
+MTERP_OFFSET(offThread_bailPtr,           InterpSaveState, bailPtr, 16)
+MTERP_OFFSET(offThread_pInterpBreak,      InterpSaveState, pInterpBreak, 20)
 
-#if defined(WITH_JIT)
-MTERP_OFFSET(offThread_inJitCodeCache,  Thread, inJitCodeCache, 72)
+/* make sure all JValue union members are stored at the same offset */
+MTERP_OFFSET(offThread_retval,            Thread, retval, 32)
+MTERP_OFFSET(offThread_retval_z,          Thread, retval.z, 32)
+MTERP_OFFSET(offThread_retval_i,          Thread, retval.i, 32)
+MTERP_OFFSET(offThread_retval_j,          Thread, retval.j, 32)
+MTERP_OFFSET(offThread_retval_l,          Thread, retval.l, 32)
+MTERP_OFFSET(offThread_suspendCount,      Thread, suspendCount, 40)
+MTERP_OFFSET(offThread_dbgSuspendCount,   Thread, dbgSuspendCount, 44)
+MTERP_OFFSET(offThread_cardTable,         Thread, cardTable, 48)
+MTERP_OFFSET(offThread_interpStackEnd,    Thread, interpStackEnd, 52)
+MTERP_OFFSET(offThread_curFrame,          Thread, curFrame, 56)
+MTERP_OFFSET(offThread_exception,         Thread, exception, 60)
+MTERP_OFFSET(offThread_threadId,          Thread, threadId, 64)
+MTERP_OFFSET(offThread_debugIsMethodEntry, Thread, debugIsMethodEntry, 68)
+MTERP_OFFSET(offThread_interpStackSize,   Thread, interpStackSize, 72)
+MTERP_OFFSET(offThread_stackOverflowed,   Thread, stackOverflowed, 76)
+MTERP_OFFSET(offThread_entryPoint,        Thread, entryPoint, 80)
+MTERP_OFFSET(offThread_curHandlerTable,   Thread, curHandlerTable, 84)
+
+#ifdef WITH_JIT
+MTERP_OFFSET(offThread_jitToInterpEntries,Thread, jitToInterpEntries, 96)
+MTERP_OFFSET(offThread_inJitCodeCache,    Thread, inJitCodeCache, 120)
+MTERP_OFFSET(offThread_pJitProfTable,     Thread, pJitProfTable, 124)
+MTERP_OFFSET(offThread_ppJitProfTable,    Thread, ppJitProfTable, 128)
+MTERP_OFFSET(offThread_jitThreshold,      Thread, jitThreshold, 132)
+MTERP_OFFSET(offThread_jitResumeNPC,      Thread, jitResumeNPC, 136)
+MTERP_OFFSET(offThread_jitResumeDPC,      Thread, jitResumeDPC, 140)
+MTERP_OFFSET(offThread_jitState,          Thread, jitState, 144)
+MTERP_OFFSET(offThread_icRechainCount,    Thread, icRechainCount, 148)
+MTERP_OFFSET(offThread_pProfileCountdown, Thread, pProfileCountdown, 152)
+MTERP_OFFSET(offThread_jniLocal_topCookie, \
+                                Thread, jniLocalRefTable.segmentState.all, 156)
 #if defined(WITH_SELF_VERIFICATION)
-MTERP_OFFSET(offThread_shadowSpace,     Thread, shadowSpace, 76)
-#ifdef USE_INDIRECT_REF
-MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.segmentState.all, 80)
-#else
-MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.nextEntry, 80)
+MTERP_OFFSET(offThread_shadowSpace,       Thread, shadowSpace, 172)
 #endif
 #else
-#ifdef USE_INDIRECT_REF
 MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.segmentState.all, 76)
-#else
-MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.nextEntry, 76)
-#endif
-#endif
-#else
-#ifdef USE_INDIRECT_REF
-MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.segmentState.all, 72)
-#else
-MTERP_OFFSET(offThread_jniLocal_topCookie, \
-                                Thread, jniLocalRefTable.nextEntry, 72)
-#endif
+                                Thread, jniLocalRefTable.segmentState.all, 96)
 #endif
 
 /* Object fields */
@@ -315,6 +297,7 @@
 MTERP_CONSTANT(ACC_NATIVE,          0x0100)
 MTERP_CONSTANT(ACC_INTERFACE,       0x0200)
 MTERP_CONSTANT(ACC_ABSTRACT,        0x0400)
+MTERP_CONSTANT(CLASS_ISFINALIZABLE, 1<<31)
 
 /* flags for dvmMalloc */
 MTERP_CONSTANT(ALLOC_DONT_TRACK,    0x01)
@@ -324,3 +307,11 @@
 
 /* opcode number */
 MTERP_CONSTANT(OP_MOVE_EXCEPTION,   0x0d)
+
+/* flags for interpBreak */
+MTERP_CONSTANT(kSubModeNormal,         0x0000)
+MTERP_CONSTANT(kSubModeMethodTrace,    0x0001)
+MTERP_CONSTANT(kSubModeEmulatorTrace,  0x0002)
+MTERP_CONSTANT(kSubModeInstCounting,   0x0004)
+MTERP_CONSTANT(kSubModeDebuggerActive, 0x0008)
+MTERP_CONSTANT(kSubModeSuspendRequest, 0x0010)
diff --git a/vm/mterp/config-allstubs b/vm/mterp/config-allstubs
index 23796d8..a105049 100644
--- a/vm/mterp/config-allstubs
+++ b/vm/mterp/config-allstubs
@@ -15,9 +15,12 @@
 #
 # Configuration for "allstubs" target.  This is structured like the
 # assembly interpreters, but consists entirely of C stubs, making it
-# a handy if inefficient way to exercise all of the C handlers.
+# a handy if inefficient way to exercise all of the C handlers.  The
+# handler-style command should match the target assembly interpreter.
 #
 
+#handler-style jump-table
+handler-style computed-goto
 handler-size 64
 
 # C file header and basic definitions
diff --git a/vm/mterp/config-armv5te b/vm/mterp/config-armv5te
index 2dceb04..e00fea9 100644
--- a/vm/mterp/config-armv5te
+++ b/vm/mterp/config-armv5te
@@ -16,11 +16,15 @@
 # Configuration for ARMv5TE architecture targets.
 #
 
+handler-style computed-goto
 handler-size 64
 
 # source for the instruction table stub
 asm-stub armv5te/stub.S
 
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
 # file header and basic definitions
 import c/header.c
 import armv5te/header.S
diff --git a/vm/mterp/config-armv5te-vfp b/vm/mterp/config-armv5te-vfp
index ce0c521..70ce967 100644
--- a/vm/mterp/config-armv5te-vfp
+++ b/vm/mterp/config-armv5te-vfp
@@ -20,11 +20,15 @@
 # operations except for "remainder" and conversions to/from 64-bit ints.
 #
 
+handler-style computed-goto
 handler-size 64
 
 # source for the instruction table stub
 asm-stub armv5te/stub.S
 
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
 # file header and basic definitions
 import c/header.c
 import armv5te/header.S
diff --git a/vm/mterp/config-armv7-a b/vm/mterp/config-armv7-a
index e66640c..b9b998f 100644
--- a/vm/mterp/config-armv7-a
+++ b/vm/mterp/config-armv7-a
@@ -21,11 +21,15 @@
 # negligible on a Cortex-A8 CPU, so this is really just an experiment.
 #
 
+handler-style computed-goto
 handler-size 64
 
 # source for the instruction table stub
 asm-stub armv5te/stub.S
 
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
 # file header and basic definitions
 import c/header.c
 import armv5te/header.S
@@ -156,6 +160,8 @@
 
 # "helper" code for C; include if you use any of the C stubs (this generates
 # object code, so it's normally excluded)
+#
+# Add this if you see linker failures for stuff like "dvmMterp_exceptionThrown".
 ##import c/gotoTargets.c
 
 # end of defs; include this when cstubs/stubdefs.c is included
diff --git a/vm/mterp/config-armv7-a-neon b/vm/mterp/config-armv7-a-neon
index e66640c..fd18f69 100644
--- a/vm/mterp/config-armv7-a-neon
+++ b/vm/mterp/config-armv7-a-neon
@@ -21,11 +21,15 @@
 # negligible on a Cortex-A8 CPU, so this is really just an experiment.
 #
 
+handler-style computed-goto
 handler-size 64
 
 # source for the instruction table stub
 asm-stub armv5te/stub.S
 
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
 # file header and basic definitions
 import c/header.c
 import armv5te/header.S
diff --git a/vm/mterp/config-portdbg b/vm/mterp/config-portdbg
index c6982d7..feab06d 100644
--- a/vm/mterp/config-portdbg
+++ b/vm/mterp/config-portdbg
@@ -17,7 +17,7 @@
 # debugging enabled.
 #
 
-#handler-size 64
+handler-style all-c
 
 # C file header and basic definitions
 import c/header.c
diff --git a/vm/mterp/config-portstd b/vm/mterp/config-portstd
index 41ecb4f..d0f609e 100644
--- a/vm/mterp/config-portstd
+++ b/vm/mterp/config-portstd
@@ -19,7 +19,7 @@
 # here because it's convenient.
 #
 
-#handler-size 64
+handler-style all-c
 
 # C file header and basic definitions
 import c/header.c
diff --git a/vm/mterp/config-x86 b/vm/mterp/config-x86
index f137198..a99e5a7 100644
--- a/vm/mterp/config-x86
+++ b/vm/mterp/config-x86
@@ -16,11 +16,14 @@
 # Configuration for "desktop" targets.
 #
 
-handler-size 64
+handler-style jump-table
 
 # source for the instruction table stub
 asm-stub x86/stub.S
 
+# source for alternate entry stub
+asm-alt-stub x86/alt_stub.S
+
 # C file header and basic definitions
 import c/header.c
 import x86/header.S
@@ -40,6 +43,21 @@
     op OP_SGET_WIDE_VOLATILE c
     op OP_SPUT_WIDE_VOLATILE c
     op OP_RETURN_VOID_BARRIER c
+    op OP_INVOKE_OBJECT_INIT_RANGE c
+
+    op OP_INVOKE_OBJECT_INIT_JUMBO c
+    op OP_IGET_VOLATILE_JUMBO c
+    op OP_IPUT_VOLATILE_JUMBO c
+    op OP_SGET_VOLATILE_JUMBO c
+    op OP_SPUT_VOLATILE_JUMBO c
+    op OP_IGET_OBJECT_VOLATILE_JUMBO c
+    op OP_IPUT_OBJECT_VOLATILE_JUMBO c
+    op OP_SGET_OBJECT_VOLATILE_JUMBO c
+    op OP_SPUT_OBJECT_VOLATILE_JUMBO c
+    op OP_IGET_WIDE_VOLATILE_JUMBO c
+    op OP_IPUT_WIDE_VOLATILE_JUMBO c
+    op OP_SGET_WIDE_VOLATILE_JUMBO c
+    op OP_SPUT_WIDE_VOLATILE_JUMBO c
 op-end
 
 # arch-specific entry point to interpreter
diff --git a/vm/mterp/config-x86-atom b/vm/mterp/config-x86-atom
index ef2a90c..4a13d7a 100644
--- a/vm/mterp/config-x86-atom
+++ b/vm/mterp/config-x86-atom
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 # Specifies the size of the assembly region in bytes
+handler-style computed-goto
 handler-size 64
 
 # source for the instruction table stub
@@ -290,6 +291,21 @@
 op OP_SGET_WIDE_VOLATILE c
 op OP_SPUT_WIDE_VOLATILE c
 op OP_RETURN_VOID_BARRIER c
+op OP_INVOKE_OBJECT_INIT_RANGE c
+
+op OP_INVOKE_OBJECT_INIT_JUMBO c
+op OP_IGET_VOLATILE_JUMBO c
+op OP_IPUT_VOLATILE_JUMBO c
+op OP_SGET_VOLATILE_JUMBO c
+op OP_SPUT_VOLATILE_JUMBO c
+op OP_IGET_OBJECT_VOLATILE_JUMBO c
+op OP_IPUT_OBJECT_VOLATILE_JUMBO c
+op OP_SGET_OBJECT_VOLATILE_JUMBO c
+op OP_SPUT_OBJECT_VOLATILE_JUMBO c
+op OP_IGET_WIDE_VOLATILE_JUMBO c
+op OP_IPUT_WIDE_VOLATILE_JUMBO c
+op OP_SGET_WIDE_VOLATILE_JUMBO c
+op OP_SPUT_WIDE_VOLATILE_JUMBO c
 op-end
 
 # arch-specific entry point to interpreter
diff --git a/vm/mterp/cstubs/entry.c b/vm/mterp/cstubs/entry.c
index 4fe0d2c..58add85 100644
--- a/vm/mterp/cstubs/entry.c
+++ b/vm/mterp/cstubs/entry.c
@@ -17,12 +17,12 @@
  *
  * This is only used for the "allstubs" variant.
  */
-bool dvmMterpStdRun(MterpGlue* glue)
+bool dvmMterpStdRun(Thread* self)
 {
     jmp_buf jmpBuf;
     int changeInterp;
 
-    glue->bailPtr = &jmpBuf;
+    self->bailPtr = &jmpBuf;
 
     /*
      * We want to return "changeInterp" as a boolean, but we can't return
@@ -40,18 +40,18 @@
      * We need to pick up where the other interpreter left off.
      *
      * In some cases we need to call into a throw/return handler which
-     * will do some processing and then either return to us (updating "glue")
+     * will do some processing and then either return to us (updating "self")
      * or longjmp back out.
      */
-    switch (glue->entryPoint) {
+    switch (self->entryPoint) {
     case kInterpEntryInstr:
         /* just start at the start */
         break;
     case kInterpEntryReturn:
-        dvmMterp_returnFromMethod(glue);
+        dvmMterp_returnFromMethod(self);
         break;
     case kInterpEntryThrow:
-        dvmMterp_exceptionThrown(glue);
+        dvmMterp_exceptionThrown(self);
         break;
     default:
         dvmAbort();
@@ -59,22 +59,22 @@
 
     /* run until somebody longjmp()s out */
     while (true) {
-        typedef void (*Handler)(MterpGlue* glue);
+        typedef void (*Handler)(Thread* self);
 
-        u2 inst = /*glue->*/pc[0];
+        u2 inst = /*self->*/pc[0];
         Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
         (void) gDvmMterpHandlerNames;   /* avoid gcc "defined but not used" */
         LOGVV("handler %p %s\n",
             handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]);
-        (*handler)(glue);
+        (*handler)(self);
     }
 }
 
 /*
  * C mterp exit point.  Call here to bail out of the interpreter.
  */
-void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+void dvmMterpStdBail(Thread* self, bool changeInterp)
 {
-    jmp_buf* pJmpBuf = glue->bailPtr;
+    jmp_buf* pJmpBuf = self->bailPtr;
     longjmp(*pJmpBuf, ((int)changeInterp)+1);
 }
diff --git a/vm/mterp/cstubs/stubdefs.c b/vm/mterp/cstubs/stubdefs.c
index 9911ce1..1b5695a 100644
--- a/vm/mterp/cstubs/stubdefs.c
+++ b/vm/mterp/cstubs/stubdefs.c
@@ -4,7 +4,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -12,11 +12,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -27,16 +27,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -44,12 +43,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -76,25 +75,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -104,9 +103,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -121,7 +120,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
diff --git a/vm/mterp/gen-mterp.py b/vm/mterp/gen-mterp.py
index b55ed58..f8043bb 100755
--- a/vm/mterp/gen-mterp.py
+++ b/vm/mterp/gen-mterp.py
@@ -23,16 +23,22 @@
 from string import Template
 
 interp_defs_file = "../../libdex/DexOpcodes.h" # need opcode list
-kNumPackedOpcodes = 256 # TODO: Derive this from DexOpcodes.h.
+kNumPackedOpcodes = 512 # TODO: Derive this from DexOpcodes.h.
 
 verbose = False
 handler_size_bits = -1000
 handler_size_bytes = -1000
 in_op_start = 0             # 0=not started, 1=started, 2=ended
+in_alt_op_start = 0         # 0=not started, 1=started, 2=ended
 default_op_dir = None
+default_alt_stub = None
 opcode_locations = {}
+alt_opcode_locations = {}
 asm_stub_text = []
 label_prefix = ".L"         # use ".L" to hide labels from gdb
+alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
+style = None                # interpreter style
+generate_alt_table = False
 
 # Exception class.
 class DataParseError(SyntaxError):
@@ -47,18 +53,32 @@
 
 #
 # Parse arch config file --
+# Set interpreter style.
+#
+def setHandlerStyle(tokens):
+    global style
+    if len(tokens) != 2:
+        raise DataParseError("handler-style requires one argument")
+    style = tokens[1]
+    if style != "computed-goto" and style != "jump-table" and style != "all-c":
+        raise DataParseError("handler-style (%s) invalid" % style)
+
+#
+# Parse arch config file --
 # Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
-# log2(handler_size_bytes).  Throws an exception if "bytes" is not a power
-# of two.
+# log2(handler_size_bytes).  Throws an exception if "bytes" is not 0 or
+# a power of two.
 #
 def setHandlerSize(tokens):
     global handler_size_bits, handler_size_bytes
+    if style != "computed-goto":
+        print "Warning: handler-size valid only for computed-goto interpreters"
     if len(tokens) != 2:
         raise DataParseError("handler-size requires one argument")
     if handler_size_bits != -1000:
         raise DataParseError("handler-size may only be set once")
 
-    # compute log2(n), and make sure n is a power of 2
+    # compute log2(n), and make sure n is 0 or a power of 2
     handler_size_bytes = bytes = int(tokens[1])
     bits = -1
     while bytes > 0:
@@ -66,7 +86,7 @@
         bits += 1
 
     if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
-        raise DataParseError("handler-size (%d) must be power of 2 and > 0" \
+        raise DataParseError("handler-size (%d) must be power of 2" \
                 % orig_bytes)
     handler_size_bits = bits
 
@@ -92,6 +112,8 @@
 #
 def setAsmStub(tokens):
     global asm_stub_text
+    if style == "all-c":
+        print "Warning: asm-stub ignored for all-c interpreter"
     if len(tokens) != 2:
         raise DataParseError("import requires one argument")
     try:
@@ -104,6 +126,19 @@
 
 #
 # Parse arch config file --
+# Record location of default alt stub
+#
+def setAsmAltStub(tokens):
+    global default_alt_stub, generate_alt_table
+    if style == "all-c":
+        print "Warning: asm-alt-stub ingored for all-c interpreter"
+    if len(tokens) != 2:
+        raise DataParseError("import requires one argument")
+    default_alt_stub = tokens[1]
+    generate_alt_table = True
+
+#
+# Parse arch config file --
 # Start of opcode list.
 #
 def opStart(tokens):
@@ -118,6 +153,26 @@
 
 #
 # Parse arch config file --
+# Set location of a single alt opcode's source file.
+#
+def altEntry(tokens):
+    global generate_alt_table
+    if len(tokens) != 3:
+        raise DataParseError("alt requires exactly two arguments")
+    if in_op_start != 1:
+        raise DataParseError("alt statements must be between opStart/opEnd")
+    try:
+        index = opcodes.index(tokens[1])
+    except ValueError:
+        raise DataParseError("unknown opcode %s" % tokens[1])
+    if alt_opcode_locations.has_key(tokens[1]):
+        print "Warning: alt overrides earlier %s (%s -> %s)" \
+                % (tokens[1], alt_opcode_locations[tokens[1]], tokens[2])
+    alt_opcode_locations[tokens[1]] = tokens[2]
+    generate_alt_table = True
+
+#
+# Parse arch config file --
 # Set location of a single opcode's source file.
 #
 def opEntry(tokens):
@@ -136,6 +191,20 @@
     opcode_locations[tokens[1]] = tokens[2]
 
 #
+# Emit jump table
+#
+def emitJmpTable(start_label, prefix):
+    asm_fp.write("\n    .global %s\n" % start_label)
+    asm_fp.write("    .text\n")
+    asm_fp.write("%s:\n" % start_label)
+    for i in xrange(kNumPackedOpcodes):
+        op = opcodes[i]
+        dict = getGlobalSubDict()
+        dict.update({ "opcode":op, "opnum":i })
+        asm_fp.write("    .long " + prefix + \
+                     "_%(opcode)s /* 0x%(opnum)02x */\n" % dict)
+
+#
 # Parse arch config file --
 # End of opcode list; emit instruction blocks.
 #
@@ -149,6 +218,12 @@
 
     loadAndEmitOpcodes()
 
+    if generate_alt_table:
+        loadAndEmitAltOpcodes()
+        if style == "jump-table":
+            emitJmpTable("dvmAsmInstructionStart", label_prefix);
+            emitJmpTable("dvmAsmAltInstructionStart", alt_label_prefix);
+
 
 #
 # Extract an ordered list of instructions from the VM sources.  We use the
@@ -172,6 +247,9 @@
         raise SyntaxError, "bad opcode count"
     return opcodes
 
+def emitAlign():
+    if style == "computed-goto":
+        asm_fp.write("    .balign %d\n" % handler_size_bytes)
 
 #
 # Load and emit opcodes for all kNumPackedOpcodes instructions.
@@ -180,11 +258,17 @@
     sister_list = []
     assert len(opcodes) == kNumPackedOpcodes
     need_dummy_start = False
+    if style == "jump-table":
+        start_label = "dvmAsmInstructionStartCode"
+        end_label = "dvmAsmInstructionEndCode"
+    else:
+        start_label = "dvmAsmInstructionStart"
+        end_label = "dvmAsmInstructionEnd"
 
     # point dvmAsmInstructionStart at the first handler or stub
-    asm_fp.write("\n    .global dvmAsmInstructionStart\n")
-    asm_fp.write("    .type   dvmAsmInstructionStart, %function\n")
-    asm_fp.write("dvmAsmInstructionStart = " + label_prefix + "_OP_NOP\n")
+    asm_fp.write("\n    .global %s\n" % start_label)
+    asm_fp.write("    .type   %s, %%function\n" % start_label)
+    asm_fp.write("%s = " % start_label + label_prefix + "_OP_NOP\n")
     asm_fp.write("    .text\n\n")
 
     for i in xrange(kNumPackedOpcodes):
@@ -207,25 +291,70 @@
     # too annoying to try to slide it in after the alignment psuedo-op, so
     # we take the low road and just emit a dummy OP_NOP here.
     if need_dummy_start:
-        asm_fp.write("    .balign %d\n" % handler_size_bytes)
+        emitAlign()
         asm_fp.write(label_prefix + "_OP_NOP:   /* dummy */\n");
 
-    asm_fp.write("\n    .balign %d\n" % handler_size_bytes)
-    asm_fp.write("    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart\n")
-    asm_fp.write("    .global dvmAsmInstructionEnd\n")
-    asm_fp.write("dvmAsmInstructionEnd:\n")
+    emitAlign()
+    asm_fp.write("    .size   %s, .-%s\n" % (start_label, start_label))
+    asm_fp.write("    .global %s\n" % end_label)
+    asm_fp.write("%s:\n" % end_label)
 
-    emitSectionComment("Sister implementations", asm_fp)
-    asm_fp.write("    .global dvmAsmSisterStart\n")
-    asm_fp.write("    .type   dvmAsmSisterStart, %function\n")
-    asm_fp.write("    .text\n")
-    asm_fp.write("    .balign 4\n")
-    asm_fp.write("dvmAsmSisterStart:\n")
-    asm_fp.writelines(sister_list)
+    if style == "computed-goto":
+        emitSectionComment("Sister implementations", asm_fp)
+        asm_fp.write("    .global dvmAsmSisterStart\n")
+        asm_fp.write("    .type   dvmAsmSisterStart, %function\n")
+        asm_fp.write("    .text\n")
+        asm_fp.write("    .balign 4\n")
+        asm_fp.write("dvmAsmSisterStart:\n")
+        asm_fp.writelines(sister_list)
 
-    asm_fp.write("\n    .size   dvmAsmSisterStart, .-dvmAsmSisterStart\n")
-    asm_fp.write("    .global dvmAsmSisterEnd\n")
-    asm_fp.write("dvmAsmSisterEnd:\n\n")
+        asm_fp.write("\n    .size   dvmAsmSisterStart, .-dvmAsmSisterStart\n")
+        asm_fp.write("    .global dvmAsmSisterEnd\n")
+        asm_fp.write("dvmAsmSisterEnd:\n\n")
+
+#
+# Load an alternate entry stub
+#
+def loadAndEmitAltStub(source, opindex):
+    op = opcodes[opindex]
+    if verbose:
+        print " alt emit %s --> stub" % source
+    dict = getGlobalSubDict()
+    dict.update({ "opcode":op, "opnum":opindex })
+
+    emitAsmHeader(asm_fp, dict, alt_label_prefix)
+    appendSourceFile(source, dict, asm_fp, None)
+
+#
+# Load and emit alternate opcodes for all kNumPackedOpcodes instructions.
+#
+def loadAndEmitAltOpcodes():
+    assert len(opcodes) == kNumPackedOpcodes
+    if style == "jump-table":
+        start_label = "dvmAsmAltInstructionStartCode"
+        end_label = "dvmAsmAltInstructionEndCode"
+    else:
+        start_label = "dvmAsmAltInstructionStart"
+        end_label = "dvmAsmAltInstructionEnd"
+
+    # point dvmAsmInstructionStart at the first handler or stub
+    asm_fp.write("\n    .global %s\n" % start_label)
+    asm_fp.write("    .type   %s, %%function\n" % start_label)
+    asm_fp.write("%s:\n" % start_label)
+    asm_fp.write("    .text\n\n")
+
+    for i in xrange(kNumPackedOpcodes):
+        op = opcodes[i]
+        if alt_opcode_locations.has_key(op):
+            source = "%s/ALT_%s.S" % (alt_opcode_locations[op], op)
+        else:
+            source = default_alt_stub
+        loadAndEmitAltStub(source, i)
+
+    emitAlign()
+    asm_fp.write("    .size   %s, .-%s\n" % (start_label, start_label))
+    asm_fp.write("    .global %s\n" % end_label)
+    asm_fp.write("%s:\n" % end_label)
 
 #
 # Load a C fragment and emit it, then output an assembly stub.
@@ -254,28 +383,28 @@
     if verbose:
         print " emit %s --> asm" % source
 
-    emitAsmHeader(asm_fp, dict)
+    emitAsmHeader(asm_fp, dict, label_prefix)
     appendSourceFile(source, dict, asm_fp, sister_list)
 
 #
 # Output the alignment directive and label for an assembly piece.
 #
-def emitAsmHeader(outfp, dict):
+def emitAsmHeader(outfp, dict, prefix):
     outfp.write("/* ------------------------------ */\n")
     # The alignment directive ensures that the handler occupies
     # at least the correct amount of space.  We don't try to deal
     # with overflow here.
-    outfp.write("    .balign %d\n" % handler_size_bytes)
+    emitAlign()
     # Emit a label so that gdb will say the right thing.  We prepend an
     # underscore so the symbol name doesn't clash with the Opcode enum.
-    outfp.write(label_prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
+    outfp.write(prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
 
 #
 # Output a generic instruction stub that updates the "glue" struct and
 # calls the C implementation.
 #
 def emitAsmStub(outfp, dict):
-    emitAsmHeader(outfp, dict)
+    emitAsmHeader(outfp, dict, label_prefix)
     for line in asm_stub_text:
         templ = Template(line)
         outfp.write(templ.substitute(dict))
@@ -334,7 +463,7 @@
 
         elif line.startswith("%break") and sister_list != None:
             # allow more than one %break, ignoring all following the first
-            if not in_sister:
+            if style == "computed-goto" and not in_sister:
                 in_sister = True
                 sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
             continue
@@ -452,14 +581,23 @@
                 importFile(tokens)
             elif tokens[0] == "asm-stub":
                 setAsmStub(tokens)
+            elif tokens[0] == "asm-alt-stub":
+                setAsmAltStub(tokens)
             elif tokens[0] == "op-start":
                 opStart(tokens)
             elif tokens[0] == "op-end":
                 opEnd(tokens)
+            elif tokens[0] == "alt":
+                altEntry(tokens)
             elif tokens[0] == "op":
                 opEntry(tokens)
+            elif tokens[0] == "handler-style":
+                setHandlerStyle(tokens)
             else:
                 raise DataParseError, "unrecognized command '%s'" % tokens[0]
+            if style == None:
+                print "tokens[0] = %s" % tokens[0]
+                raise DataParseError, "handler-style must be first command"
 except DataParseError, err:
     print "Failed: " + str(err)
     # TODO: remove output files so "make" doesn't get confused
diff --git a/vm/mterp/out/InterpAsm-allstubs.S b/vm/mterp/out/InterpAsm-allstubs.S
index a6973ae..779fd5f 100644
--- a/vm/mterp/out/InterpAsm-allstubs.S
+++ b/vm/mterp/out/InterpAsm-allstubs.S
@@ -12,7 +12,6 @@
 
     .balign 64
 .L_OP_NOP:   /* dummy */
-
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
     .global dvmAsmInstructionEnd
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 3b7ec3e..a287d38 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -63,7 +63,7 @@
   reg nick      purpose
   r4  rPC       interpreted program counter, used for fetching instructions
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     self (Thread) pointer
   r7  rINST     first 16-bit code unit of current instruction
   r8  rIBASE    interpreted instruction base pointer, used for computed goto
 
@@ -75,21 +75,21 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
-/* save/restore the PC and/or FP from the glue struct */
-#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
-#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
-#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
-#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
-#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
-#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()     ldr     rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF()       str     rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF()     ldr     rFP, [rSELF, #offThread_fp]
+#define SAVE_FP_TO_SELF()       str     rFP, [rSELF, #offThread_fp]
+#define LOAD_PC_FP_FROM_SELF()  ldmia   rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF()    stmia   rSELF, {rPC, rFP}
 
 /*
  * "export" the PC to the stack frame, f/b/o future exception objects.  Must
- * be done *before* something calls dvmThrowException.
+ * be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -124,14 +124,14 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #((_count)*2)]!
 
 /*
  * The operation performed here is similar to FETCH_ADVANCE_INST, except the
  * src and dest registers are parameterized (not hard-wired to rPC and rINST).
  */
 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-        ldrh    _dreg, [_sreg, #(_count*2)]!
+        ldrh    _dreg, [_sreg, #((_count)*2)]!
 
 /*
  * Fetch the next instruction from an offset specified by _reg.  Updates
@@ -151,15 +151,15 @@
  *
  * The "_S" variant works the same but treats the value as signed.
  */
-#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
-#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #((_count)*2)]
 
 /*
  * Fetch one byte from an offset past the current PC.  Pass in the same
  * "_count" as you would for FETCH, and an additional 0/1 indicating which
  * byte of the halfword you want (lo/hi).
  */
-#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #((_count)*2+(_byte))]
 
 /*
  * Put the instruction's opcode field into the specified register.
@@ -186,8 +186,8 @@
 #define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
 
 #if defined(WITH_JIT)
-#define GET_JIT_PROF_TABLE(_reg)    ldr     _reg,[rGLUE,#offGlue_pJitProfTable]
-#define GET_JIT_THRESHOLD(_reg)     ldr     _reg,[rGLUE,#offGlue_jitThreshold]
+#define GET_JIT_PROF_TABLE(_reg)    ldr _reg,[rSELF,#offThread_pJitProfTable]
+#define GET_JIT_THRESHOLD(_reg)     ldr _reg,[rSELF,#offThread_jitThreshold]
 #endif
 
 /*
@@ -266,7 +266,7 @@
 
 /*
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *
  * This function returns a boolean "changeInterp" value.  The return comes
  * via a call to dvmMterpStdBail().
@@ -284,29 +284,28 @@
     MTERP_ENTRY2
 
     /* save stack pointer, add magic word for debuggerd */
-    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+    str     sp, [r0, #offThread_bailPtr]  @ save SP for eventual return
 
     /* set up "named" registers, figure out entry point */
-    mov     rGLUE, r0                   @ set rGLUE
-    ldr     r1, [r0, #offGlue_entryPoint]   @ enum is 4 bytes in aapcs-EABI
-    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
-    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    mov     rSELF, r0                   @ set rSELF
+    ldr     r1, [r0, #offThread_entryPoint]   @ enum is 4 bytes in aapcs-EABI
+    LOAD_PC_FP_FROM_SELF()              @ load rPC and rFP from "thread"
+    ldr     rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
     cmp     r1, #kInterpEntryInstr      @ usual case?
     bne     .Lnot_instr                 @ no, handle it
 
 #if defined(WITH_JIT)
 .LentryInstr:
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     /* Entry is always a possible trace start */
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     mov     r1, #0                      @ prepare the value for the new state
-    str     r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str     r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     cmp     r0,#0                       @ is profiling disabled?
 #if !defined(WITH_SELF_VERIFICATION)
     bne     common_updateProfile        @ profiling is enabled
 #else
-    ldr     r2, [r10, #offThread_shadowSpace]   @ to find out the jit exit state
+    ldr     r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
     beq     1f                          @ profiling is disabled
     ldr     r3, [r2, #offShadowSpace_jitExitState]  @ jit exit state
     cmp     r3, #kSVSTraceSelect        @ hot trace following?
@@ -336,20 +335,20 @@
 
 #if defined(WITH_JIT)
 .Lnot_throw:
-    ldr     r10,[rGLUE, #offGlue_jitResumeNPC]
-    ldr     r2,[rGLUE, #offGlue_jitResumeDPC]
+    ldr     r10,[rSELF, #offThread_jitResumeNPC]
+    ldr     r2,[rSELF, #offThread_jitResumeDPC]
     cmp     r1, #kInterpEntryResume     @ resuming after Jit single-step?
     bne     .Lbad_arg
     cmp     rPC,r2
     bne     .LentryInstr                @ must have branched, don't resume
 #if defined(WITH_SELF_VERIFICATION)
-    @ glue->entryPoint will be set in dvmSelfVerificationSaveState
+    @ self->entryPoint will be set in dvmSelfVerificationSaveState
     b       jitSVShadowRunStart         @ re-enter the translation after the
                                         @ single-stepped instruction
     @noreturn
 #endif
     mov     r1, #kInterpEntryInstr
-    str     r1, [rGLUE, #offGlue_entryPoint]
+    str     r1, [rSELF, #offThread_entryPoint]
     bx      r10                         @ re-enter the translation
 #endif
 
@@ -359,6 +358,7 @@
     bl      printf
     bl      dvmAbort
     .fnend
+    .size   dvmMterpStdRun, .-dvmMterpStdRun
 
 
     .global dvmMterpStdBail
@@ -374,11 +374,11 @@
  * LR to PC.
  *
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *  r1  bool changeInterp
  */
 dvmMterpStdBail:
-    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    ldr     sp, [r0, #offThread_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
     ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
@@ -563,7 +563,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -574,7 +574,7 @@
 /* File: armv5te/OP_MOVE_RESULT_WIDE.S */
     /* move-result-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
@@ -591,7 +591,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -602,14 +602,13 @@
 .L_OP_MOVE_EXCEPTION: /* 0x0d */
 /* File: armv5te/OP_MOVE_EXCEPTION.S */
     /* move-exception vAA */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    ldr     r3, [rSELF, #offThread_exception]  @ r3<- dvmGetException bypass
     mov     r1, #0                      @ r1<- 0
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
-    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    str     r1, [rSELF, #offThread_exception]  @ dvmClearException bypass
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /* ------------------------------ */
@@ -623,7 +622,7 @@
 .L_OP_RETURN: /* 0x0f */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -631,7 +630,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 /* ------------------------------ */
@@ -639,13 +638,13 @@
 .L_OP_RETURN_WIDE: /* 0x10 */
 /* File: armv5te/OP_RETURN_WIDE.S */
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
     stmia   r3, {r0-r1}                 @ retval<- r0/r1
     b       common_returnFromMethod
@@ -656,7 +655,7 @@
 /* File: armv5te/OP_RETURN_OBJECT.S */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -664,7 +663,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 
@@ -790,7 +789,7 @@
 /* File: armv5te/OP_CONST_STRING.S */
     /* const/string vAA, String@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
@@ -808,7 +807,7 @@
     /* const/string vAA, String@BBBBBBBB */
     FETCH(r0, 1)                        @ r0<- bbbb (low)
     FETCH(r1, 2)                        @ r1<- BBBB (high)
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
@@ -826,7 +825,7 @@
 /* File: armv5te/OP_CONST_CLASS.S */
     /* const/class vAA, Class@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
@@ -847,18 +846,12 @@
     /* monitor-enter vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     cmp     r1, #0                      @ null object?
-    EXPORT_PC()                         @ need for precise GC, MONITOR_TRACKING
+    EXPORT_PC()                         @ need for precise GC
     beq     common_errNullObject        @ null object, throw an exception
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     bl      dvmLockObject               @ call(self, obj)
-#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
-    ldr     r1, [r0, #offThread_exception] @ check for exception
-    cmp     r1, #0
-    bne     common_exceptionThrown      @ exception raised, bail out
-#endif
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
@@ -879,7 +872,7 @@
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
     cmp     r1, #0                      @ null object?
     beq     1f                          @ yes
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
     cmp     r0, #0                      @ failed?
     FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
@@ -901,7 +894,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     FETCH(r2, 1)                        @ r2<- BBBB
     GET_VREG(r9, r3)                    @ r9<- object
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
     cmp     r9, #0                      @ is object null?
     ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
     beq     .LOP_CHECK_CAST_okay            @ null obj, cast always succeeds
@@ -933,7 +926,7 @@
     GET_VREG(r0, r3)                    @ r0<- vB (object)
     and     r9, r9, #15                 @ r9<- A
     cmp     r0, #0                      @ is object null?
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
     beq     .LOP_INSTANCE_OF_store           @ null obj, not an instance, store r0
     FETCH(r3, 1)                        @ r3<- CCCC
     ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
@@ -973,7 +966,7 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
@@ -1003,12 +996,12 @@
     /* new-array vA, vB, class@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
     FETCH(r2, 1)                        @ r2<- CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     GET_VREG(r1, r0)                    @ r1<- vB (array length)
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     cmp     r1, #0                      @ check length
     ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
-    bmi     common_errNegativeArraySize @ negative length, bail
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ req'd for resolve, alloc
     bne     .LOP_NEW_ARRAY_finish          @ resolved, continue
@@ -1025,7 +1018,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1033,7 +1026,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1053,7 +1046,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1061,7 +1054,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_RANGE_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1099,12 +1092,11 @@
     /* throw vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     EXPORT_PC()                         @ exception handler can throw
     cmp     r1, #0                      @ null object?
     beq     common_errNullObject        @ yes, throw an NPE instead
     @ bypass dvmSetException, just store it
-    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    str     r1, [rSELF, #offThread_exception]  @ thread->exception<- obj
     b       common_exceptionThrown
 
 /* ------------------------------ */
@@ -2388,14 +2380,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2412,14 +2404,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2439,14 +2431,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2468,14 +2460,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2497,14 +2489,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2526,14 +2518,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2555,14 +2547,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2582,14 +2574,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2603,14 +2595,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2629,14 +2621,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2657,14 +2649,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2686,14 +2678,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2715,14 +2707,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2744,14 +2736,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2770,7 +2762,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2793,7 +2785,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2824,7 +2816,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2851,7 +2843,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2878,7 +2870,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2905,7 +2897,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2932,7 +2924,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2958,7 +2950,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2981,7 +2973,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -3011,13 +3003,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -3037,7 +3029,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3064,7 +3056,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3091,7 +3083,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3118,7 +3110,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3145,7 +3137,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3156,7 +3148,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3176,7 +3168,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3185,7 +3177,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3209,7 +3201,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3237,14 +3229,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodNoRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3270,9 +3262,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -3300,7 +3292,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3311,7 +3303,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3333,7 +3325,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3342,7 +3334,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3368,7 +3360,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3398,14 +3390,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3433,9 +3425,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -7093,14 +7085,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7121,14 +7113,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7148,7 +7140,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7175,7 +7167,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7203,14 +7195,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7229,14 +7221,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7252,14 +7244,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7277,7 +7269,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7307,7 +7299,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -7346,7 +7338,7 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     FETCH(r2, 1)                        @ r2<- BBBB
     EXPORT_PC()                         @ export the PC
     mov     r1, rINST, lsr #8           @ r1<- AA
@@ -7369,11 +7361,11 @@
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #12          @ r0<- B
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7399,11 +7391,11 @@
      */
     /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #8           @ r0<- AA
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_RANGE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7414,12 +7406,23 @@
 
 /* ------------------------------ */
     .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
     /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
      */
-    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    FETCH(r1, 2)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(2+1)       @ advance to next instr, load rINST
     GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
     GOTO_OPCODE(ip)                     @ execute it
 
@@ -7541,7 +7544,7 @@
     beq     common_errNullObject        @ object was null
     and     r2, r2, #15
     GET_VREG(r0, r2)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
     cmp     r0, #0
@@ -7613,7 +7616,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7641,7 +7644,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7669,14 +7672,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7696,7 +7699,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7723,13 +7726,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_VOLATILE_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -7743,10 +7746,3110 @@
     .balign 64
 .L_OP_DISPATCH_FF: /* 0xff */
 /* File: armv5te/OP_DISPATCH_FF.S */
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<-self>methodClassDex
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[AAAAaaaa]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_CLASS_JUMBO_resolve
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r3, 3)                        @ r3<- BBBB
+    orr     r2, r0, r2, lsl #16         @ r2<- AAAAaaaa
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .LOP_CHECK_CAST_JUMBO_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_CHECK_CAST_JUMBO_resolve         @ not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .LOP_CHECK_CAST_JUMBO_fullcheck       @ no, do full check
+    b       .LOP_CHECK_CAST_JUMBO_okay            @ yes, finish up
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    FETCH(r9, 3)                        @ r9<- vBBBB
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
+    cmp     r0, #0                      @ is object null?
+    beq     .LOP_INSTANCE_OF_JUMBO_store           @ null obj, not an instance, store r0
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    orr     r3, r1, r3, lsl #16         @ r3<- AAAAaaaa
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_INSTANCE_OF_JUMBO_resolve         @ not resolved, do it now
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .LOP_NEW_INSTANCE_JUMBO_resolve         @ no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .LOP_NEW_INSTANCE_JUMBO_needinit        @ no, init class now
+.LOP_NEW_INSTANCE_JUMBO_initialized: @ r0=class
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    bl      dvmAllocObject              @ r0<- new object
+    b       .LOP_NEW_INSTANCE_JUMBO_finish          @ continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r2, 1)                        @ r2<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- vCCCC
+    orr     r2, r2, r3, lsl #16         @ r2<- AAAAaaaa
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vCCCC (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .LOP_NEW_ARRAY_JUMBO_finish          @ resolved, continue
+    b       .LOP_NEW_ARRAY_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ need for resolve and alloc
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_continue        @ yes, continue on
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/OP_IGET_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/OP_IGET_BYTE_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/OP_IGET_CHAR_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/OP_IGET_SHORT_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/OP_IPUT_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/OP_IPUT_BYTE_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/OP_IPUT_CHAR_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/OP_IPUT_SHORT_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 0
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BOOLEAN_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/OP_SGET_BYTE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BYTE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/OP_SGET_CHAR_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_CHAR_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/OP_SGET_SHORT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_SHORT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 0
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BOOLEAN_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/OP_SPUT_BYTE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BYTE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/OP_SPUT_CHAR_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_CHAR_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/OP_SPUT_SHORT_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_SHORT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a virtual method call.
+     */
+    /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ yes, continue on
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_JUMBO_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_JUMBO_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodJumbo    @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodJumbo    @ yes, continue on
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodJumbo    @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r2, 4)                        @ r2<- CCCC
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    EXPORT_PC()                         @ must export for invoke
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodJumbo    @ jump to common handler
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/OP_UNUSED_27FF.S */
 /* File: armv5te/unused.S */
     bl      common_abort
 
 
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/OP_UNUSED_28FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/OP_UNUSED_29FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/OP_UNUSED_2AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/OP_UNUSED_2BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/OP_UNUSED_2CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/OP_UNUSED_2DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/OP_UNUSED_2EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/OP_UNUSED_2FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/OP_UNUSED_30FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/OP_UNUSED_31FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/OP_UNUSED_32FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/OP_UNUSED_33FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/OP_UNUSED_34FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/OP_UNUSED_35FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/OP_UNUSED_36FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/OP_UNUSED_37FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/OP_UNUSED_38FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/OP_UNUSED_39FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/OP_UNUSED_3AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/OP_UNUSED_3BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/OP_UNUSED_3CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/OP_UNUSED_3DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/OP_UNUSED_3EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/OP_UNUSED_3FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/OP_UNUSED_40FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/OP_UNUSED_41FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/OP_UNUSED_42FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/OP_UNUSED_43FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/OP_UNUSED_44FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/OP_UNUSED_45FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/OP_UNUSED_46FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/OP_UNUSED_47FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/OP_UNUSED_48FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/OP_UNUSED_49FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/OP_UNUSED_4AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/OP_UNUSED_4BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/OP_UNUSED_4CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/OP_UNUSED_4DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/OP_UNUSED_4EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/OP_UNUSED_4FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/OP_UNUSED_50FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/OP_UNUSED_51FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/OP_UNUSED_52FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/OP_UNUSED_53FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/OP_UNUSED_54FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/OP_UNUSED_55FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/OP_UNUSED_56FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/OP_UNUSED_57FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/OP_UNUSED_58FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/OP_UNUSED_59FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/OP_UNUSED_5AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/OP_UNUSED_5BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/OP_UNUSED_5CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/OP_UNUSED_5DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/OP_UNUSED_5EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/OP_UNUSED_5FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/OP_UNUSED_60FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/OP_UNUSED_61FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/OP_UNUSED_62FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/OP_UNUSED_63FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/OP_UNUSED_64FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/OP_UNUSED_65FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/OP_UNUSED_66FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/OP_UNUSED_67FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/OP_UNUSED_68FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/OP_UNUSED_69FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/OP_UNUSED_6AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/OP_UNUSED_6BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/OP_UNUSED_6CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/OP_UNUSED_6DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/OP_UNUSED_6EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/OP_UNUSED_6FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/OP_UNUSED_70FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/OP_UNUSED_71FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/OP_UNUSED_72FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/OP_UNUSED_73FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/OP_UNUSED_74FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/OP_UNUSED_75FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/OP_UNUSED_76FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/OP_UNUSED_77FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/OP_UNUSED_78FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/OP_UNUSED_79FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/OP_UNUSED_7AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/OP_UNUSED_7BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/OP_UNUSED_7CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/OP_UNUSED_7DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/OP_UNUSED_7EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/OP_UNUSED_7FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/OP_UNUSED_80FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/OP_UNUSED_81FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/OP_UNUSED_82FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/OP_UNUSED_83FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/OP_UNUSED_84FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/OP_UNUSED_85FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/OP_UNUSED_86FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/OP_UNUSED_87FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/OP_UNUSED_88FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/OP_UNUSED_89FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/OP_UNUSED_8AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/OP_UNUSED_8BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/OP_UNUSED_8CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/OP_UNUSED_8DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/OP_UNUSED_8EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/OP_UNUSED_8FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/OP_UNUSED_90FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/OP_UNUSED_91FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/OP_UNUSED_92FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/OP_UNUSED_93FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/OP_UNUSED_94FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/OP_UNUSED_95FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/OP_UNUSED_96FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/OP_UNUSED_97FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/OP_UNUSED_98FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/OP_UNUSED_99FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/OP_UNUSED_9AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/OP_UNUSED_9BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/OP_UNUSED_9CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/OP_UNUSED_9DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/OP_UNUSED_9EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/OP_UNUSED_9FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/OP_UNUSED_A0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/OP_UNUSED_A1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/OP_UNUSED_A2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/OP_UNUSED_A3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/OP_UNUSED_A4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/OP_UNUSED_A5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/OP_UNUSED_A6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/OP_UNUSED_A7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/OP_UNUSED_A8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/OP_UNUSED_A9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/OP_UNUSED_AAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/OP_UNUSED_ABFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/OP_UNUSED_ACFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/OP_UNUSED_ADFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/OP_UNUSED_AEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/OP_UNUSED_AFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/OP_UNUSED_B0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/OP_UNUSED_B1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/OP_UNUSED_B2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/OP_UNUSED_B3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/OP_UNUSED_B4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/OP_UNUSED_B5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/OP_UNUSED_B6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/OP_UNUSED_B7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/OP_UNUSED_B8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/OP_UNUSED_B9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/OP_UNUSED_BAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/OP_UNUSED_BBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/OP_UNUSED_BCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/OP_UNUSED_BDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/OP_UNUSED_BEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/OP_UNUSED_BFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/OP_UNUSED_C0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/OP_UNUSED_C1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/OP_UNUSED_C2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/OP_UNUSED_C3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/OP_UNUSED_C4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/OP_UNUSED_C5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/OP_UNUSED_C6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/OP_UNUSED_C7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/OP_UNUSED_C8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/OP_UNUSED_C9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/OP_UNUSED_CAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/OP_UNUSED_CBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/OP_UNUSED_CCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/OP_UNUSED_CDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/OP_UNUSED_CEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/OP_UNUSED_CFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/OP_UNUSED_D0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/OP_UNUSED_D1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/OP_UNUSED_D2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/OP_UNUSED_D3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/OP_UNUSED_D4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/OP_UNUSED_D5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/OP_UNUSED_D6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/OP_UNUSED_D7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/OP_UNUSED_D8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/OP_UNUSED_D9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/OP_UNUSED_DAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/OP_UNUSED_DBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/OP_UNUSED_DCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/OP_UNUSED_DDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/OP_UNUSED_DEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/OP_UNUSED_DFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/OP_UNUSED_E0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/OP_UNUSED_E1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/OP_UNUSED_E2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/OP_UNUSED_E3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/OP_UNUSED_E4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/OP_UNUSED_E5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/OP_UNUSED_E6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/OP_UNUSED_E7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/OP_UNUSED_E8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/OP_UNUSED_E9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/OP_UNUSED_EAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/OP_UNUSED_EBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/OP_UNUSED_ECFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/OP_UNUSED_EDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/OP_UNUSED_EEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/OP_UNUSED_EFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/OP_UNUSED_F0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/OP_UNUSED_F1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
+     */
+    FETCH(r1, 4)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(4+1)       @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/OP_IGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/OP_SGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 1
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 1
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, Class@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    orr     r2, r1, r2, lsl #16         @ r2<- AAAAaaaa
+    EXPORT_PC()                         @ export the PC
+    FETCH(r1, 3)                        @ r1<- BBBB
+    bl      dvmThrowVerificationError   @ always throws
+    b       common_exceptionThrown      @ handle exception
 
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
@@ -7773,7 +10876,7 @@
      */
 .LOP_CONST_STRING_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7792,7 +10895,7 @@
      */
 .LOP_CONST_STRING_JUMBO_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7811,7 +10914,7 @@
      */
 .LOP_CONST_CLASS_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- Class reference
@@ -7851,7 +10954,7 @@
      */
 .LOP_CHECK_CAST_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r1, r2                      @ r1<- BBBB
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
@@ -7904,7 +11007,7 @@
      */
 .LOP_INSTANCE_OF_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     mov     r1, r3                      @ r1<- BBBB
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
@@ -7948,7 +11051,7 @@
      *  r1 holds BBBB
      */
 .LOP_NEW_INSTANCE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
@@ -7956,9 +11059,6 @@
     bne     .LOP_NEW_INSTANCE_resolved        @ no, continue
     b       common_exceptionThrown      @ yes, handle exception
 
-.LstrInstantiationErrorPtr:
-    .word   .LstrInstantiationError
-
 /* continuation for OP_NEW_ARRAY */
 
 
@@ -7969,7 +11069,7 @@
      *  r2 holds class ref CCCC
      */
 .LOP_NEW_ARRAY_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r9, r1                      @ r9<- length (save)
     mov     r1, r2                      @ r1<- CCCC
     mov     r2, #0                      @ r2<- false
@@ -8024,8 +11124,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8057,9 +11157,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8070,16 +11170,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!0)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_FILLED_NEW_ARRAY_RANGE */
@@ -8108,8 +11205,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8141,9 +11238,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8154,16 +11251,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!1)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_CMPL_FLOAT */
@@ -8243,7 +11337,7 @@
     beq     .LOP_APUT_OBJECT_throw           @ no
     mov     r1, rINST                   @ r1<- arrayObj
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
-    ldr     r2, [rGLUE, #offGlue_cardTable]     @ get biased CT base
+    ldr     r2, [rSELF, #offThread_cardTable]     @ get biased CT base
     add     r10, #offArrayObject_contents   @ r0<- pointer to slot
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     str     r9, [r10]                   @ vBB[vCC]<- vAA
@@ -8473,7 +11567,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -8574,7 +11668,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8591,7 +11685,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8606,7 +11700,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8621,7 +11715,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8636,7 +11730,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8651,7 +11745,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8666,7 +11760,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8681,7 +11775,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8699,7 +11793,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8713,7 +11807,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     @ no-op                             @ releasing store
@@ -8729,7 +11823,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8744,7 +11838,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8759,7 +11853,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8774,7 +11868,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8841,7 +11935,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -8909,7 +12003,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_RANGE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -9106,7 +12200,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9121,7 +12215,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9209,7 +12303,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9227,7 +12321,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9314,7 +12408,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -9331,7 +12425,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9344,7 +12438,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SMP_DMB                            @ releasing store
@@ -9353,10 +12447,8455 @@
     strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+/* continuation for OP_CONST_CLASS_JUMBO */
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: AAAAAAAA (Class ref)
+     *  r9: target register
+     */
+.LOP_CONST_CLASS_JUMBO_resolve:
+    EXPORT_PC()
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds desired class resolved from AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    mov     r10, r1                     @ avoid ClassObject getting clobbered
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .LOP_CHECK_CAST_JUMBO_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC()                         @ about to throw
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz (actual class)
+    mov     r1, r10                     @ r1<- desired class
+    bl      dvmThrowClassCastException
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get the next opcode.
+     */
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_CHECK_CAST_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+    /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .LOP_INSTANCE_OF_JUMBO_trivial         @ yes, trivial finish
+    @ fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to OP_INSTANCE_OF_JUMBO_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b OP_INSTANCE_OF_JUMBO_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    mov     r1, r3                      @ r1<- AAAAAAAA
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+    .balign 32                          @ minimize cache lines
+.LOP_NEW_INSTANCE_JUMBO_finish: @ r0=new object
+    FETCH(r3, 3)                        @ r3<- BBBB
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .LOP_NEW_INSTANCE_JUMBO_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds AAAAAAAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_NEW_INSTANCE_JUMBO_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref AAAAAAAA
+     */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to OP_NEW_ARRAY_JUMBO_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    FETCH(r2, 3)                        @ r2<- vBBBB
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    rINST, [r3, #1]             @ rINST<- descriptor[1]
+    FETCH(r1, 3)                        @ r1<- BBBB (length)
+    cmp     rINST, #'I'                 @ array of ints?
+    cmpne   rINST, #'L'                 @ array of objects?
+    cmpne   rINST, #'['                 @ array of arrays?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl         @ no, not handled yet
+    bl      dvmAllocArrayByClass        @ r0<- call(arClass, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 4)                        @ r1<- CCCC
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(5)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC, r9=BBBB (length)
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+
+2:  ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
+    cmp     r1, #'I'                         @ Is int array?
+    strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+    GOTO_OPCODE(ip)                          @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
+    b       common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     0
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     0
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+
+.LOP_SPUT_OBJECT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    FETCH(r10, 4)                       @ r10<- CCCC
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_JUMBO_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+.LOP_INVOKE_SUPER_JUMBO_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_JUMBO */
+
+    /*
+     * On entry:
+     *  r1 = reference (CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_JUMBO_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     1
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     1
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
     .size   dvmAsmSisterStart, .-dvmAsmSisterStart
     .global dvmAsmSisterEnd
 dvmAsmSisterEnd:
 
+
+    .global dvmAsmAltInstructionStart
+    .type   dvmAsmAltInstructionStart, %function
+dvmAsmAltInstructionStart:
+    .text
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (0 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (1 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (2 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (3 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (4 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (5 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (6 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (7 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (8 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (9 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (10 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (11 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (12 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (13 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (14 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (15 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (16 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (17 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (18 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (19 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (20 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (21 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (22 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (23 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (24 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (25 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (26 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (27 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (28 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (29 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (30 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (31 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (32 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (33 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (34 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (35 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (36 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (37 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (38 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (39 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (40 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (41 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (42 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (43 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (44 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (45 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (46 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (47 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (48 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (49 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (50 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (51 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (52 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (53 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (54 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (55 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (56 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (57 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (58 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (59 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (60 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (61 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (62 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (63 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (64 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (65 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (66 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (67 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (68 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (69 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (70 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (71 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (72 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (73 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (74 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (75 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (76 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (77 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (78 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (79 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (80 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (81 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (82 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (83 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (84 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (85 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (86 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (87 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (88 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (89 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (90 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (91 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (92 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (93 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (94 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (95 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (96 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (97 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (98 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (99 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (100 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (101 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (102 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (103 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (104 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (105 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (106 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (107 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (108 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (109 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (110 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (111 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (112 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (113 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (114 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (115 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (116 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (117 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (118 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (119 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (120 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (121 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (122 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (123 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (124 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (125 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (126 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (127 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (128 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (129 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (130 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (131 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (132 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (133 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (134 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (135 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (136 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (137 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (138 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (139 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (140 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (141 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (142 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (143 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (144 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (145 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (146 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (147 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (148 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (149 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (150 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (151 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (152 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (153 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (154 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (155 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (156 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (157 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (158 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (159 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (160 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (161 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (162 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (163 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (164 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (165 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (166 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (167 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (168 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (169 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (170 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (171 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (172 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (173 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (174 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (175 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (176 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (177 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (178 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (179 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (180 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (181 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (182 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (183 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (184 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (185 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (186 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (187 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (188 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (189 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (190 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (191 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (192 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (193 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (194 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (195 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (196 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (197 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (198 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (199 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (200 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (201 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (202 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (203 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (204 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (205 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (206 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (207 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (208 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (209 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (210 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (211 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (212 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (213 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (214 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (215 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (216 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (217 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (218 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (219 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (220 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (221 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (222 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (223 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (224 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (225 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (226 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (227 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (228 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (229 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (230 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (231 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (232 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (233 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (234 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (235 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (236 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (237 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (238 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (239 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (240 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (241 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (242 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (243 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (244 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (245 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (246 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (247 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (248 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (249 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (250 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (251 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (252 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (253 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (254 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (255 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (256 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (257 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (258 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (259 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (260 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (261 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (262 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (263 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (264 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (265 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (266 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (267 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (268 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (269 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (270 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (271 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (272 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (273 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (274 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (275 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (276 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (277 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (278 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (279 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (280 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (281 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (282 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (283 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (284 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (285 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (286 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (287 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (288 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (289 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (290 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (291 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (292 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (293 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (294 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (295 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (296 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (297 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (298 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (299 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (300 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (301 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (302 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (303 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (304 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (305 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (306 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (307 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (308 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (309 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (310 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (311 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (312 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (313 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (314 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (315 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (316 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (317 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (318 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (319 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (320 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (321 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (322 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (323 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (324 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (325 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (326 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (327 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (328 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (329 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (330 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (331 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (332 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (333 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (334 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (335 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (336 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (337 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (338 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (339 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (340 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (341 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (342 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (343 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (344 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (345 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (346 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (347 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (348 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (349 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (350 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (351 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (352 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (353 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (354 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (355 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (356 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (357 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (358 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (359 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (360 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (361 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (362 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (363 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (364 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (365 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (366 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (367 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (368 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (369 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (370 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (371 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (372 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (373 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (374 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (375 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (376 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (377 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (378 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (379 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (380 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (381 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (382 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (383 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (384 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (385 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (386 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (387 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (388 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (389 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (390 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (391 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (392 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (393 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (394 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (395 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (396 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (397 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (398 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (399 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (400 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (401 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (402 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (403 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (404 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (405 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (406 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (407 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (408 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (409 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (410 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (411 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (412 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (413 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (414 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (415 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (416 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (417 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (418 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (419 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (420 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (421 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (422 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (423 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (424 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (425 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (426 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (427 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (428 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (429 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (430 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (431 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (432 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (433 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (434 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (435 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (436 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (437 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (438 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (439 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (440 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (441 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (442 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (443 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (444 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (445 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (446 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (447 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (448 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (449 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (450 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (451 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (452 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (453 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (454 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (455 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (456 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (457 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (458 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (459 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (460 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (461 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (462 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (463 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (464 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (465 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (466 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (467 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (468 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (469 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (470 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (471 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (472 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (473 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (474 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (475 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (476 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (477 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (478 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (479 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (480 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (481 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (482 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (483 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (484 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (485 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (486 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (487 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (488 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (489 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (490 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (491 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (492 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (493 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (494 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (495 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (496 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (497 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (498 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (499 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (500 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (501 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (502 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (503 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (504 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (505 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (506 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (507 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (508 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (509 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (510 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (511 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+    .balign 64
+    .size   dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+    .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
 /* File: armv5te/footer.S */
 
 /*
@@ -9374,71 +20913,64 @@
 #if defined(WITH_SELF_VERIFICATION)
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     mov    r3, #0
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChainNoProfile
 dvmJitToInterpNoChainNoProfile:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 #else
 /*
@@ -9450,7 +20982,6 @@
  */
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    rPC, r0
 #if defined(WITH_JIT_TUNING)
     mov    r0,lr
@@ -9458,8 +20989,8 @@
 #endif
     EXPORT_PC()
     mov    r0, #0
-    str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
-    adrl   rIBASE, dvmAsmInstructionStart
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9473,17 +21004,17 @@
  */
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r1,#kInterpEntryInstr
     @ enum is 4 byte in aapcs-EABI
-    str    r1, [rGLUE, #offGlue_entryPoint]
+    str    r1, [rSELF, #offThread_entryPoint]
     mov    rPC,r0
     EXPORT_PC()
 
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r1,#1                  @ set changeInterp to bail to debug interp
     b      common_gotoBail
 
@@ -9496,10 +21027,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0                    @ !0 means translation exists
@@ -9514,12 +21044,11 @@
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST, #-4              @  .. which is 9 bytes back
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr       @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    2f
     mov    r1,rINST
@@ -9532,7 +21061,7 @@
 
 /* No translation, so request one if profiling isn't disabled*/
 2:
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     cmp    r0, #0
@@ -9558,15 +21087,14 @@
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST,#-4               @ .. which is 9 bytes back
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNormal
 #endif
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    toInterpreter            @ go if not, otherwise do chain
     mov    r1,rINST
@@ -9586,16 +21114,15 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
     bxne   r0                       @ continue native execution if so
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -9609,10 +21136,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
@@ -9621,13 +21147,13 @@
 
 /*
  * No translation, restore interpreter regs and start interpreting.
- * rGLUE & rFP were preserved in the translated code, and rPC has
+ * rSELF & rFP were preserved in the translated code, and rPC has
  * already been restored by the time we get here.  We'll need to set
  * up rIBASE & rINST, and load the address of the JitTable into r0.
  */
 toInterpreter:
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_JIT_PROF_TABLE(r0)
     @ NOTE: intended fallthrough
@@ -9659,13 +21185,13 @@
  * is already a native translation in place (and, if so,
  * jump to it now).
  */
+
     GET_JIT_THRESHOLD(r1)
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
     EXPORT_PC()
     mov     r0,rPC
-    bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
-    str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl      dvmJitGetTraceAddr          @ r0<- dvmJitGetTraceAddr(rPC)
+    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov     r1, rPC                     @ arg1 of translation may need this
     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
     cmp     r0,#0
@@ -9686,9 +21212,8 @@
     cmp     r0, r10                     @ special case?
     bne     jitSVShadowRunStart         @ set up self verification shadow space
     @ Need to clear the inJitCodeCache flag
-    ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
     mov    r3, #0                       @ 0 means not in the JIT code cache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
     /* no return */
@@ -9699,9 +21224,10 @@
  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
  */
 common_selectTrace:
-    str     r2,[rGLUE,#offGlue_jitState]
+
+    str     r2,[rSELF,#offThread_jitState]
     mov     r2,#kInterpEntryInstr       @ normal entry reason
-    str     r2,[rGLUE,#offGlue_entryPoint]
+    str     r2,[rSELF,#offThread_entryPoint]
     mov     r1,#1                       @ set changeInterp
     b       common_gotoBail
 
@@ -9710,42 +21236,41 @@
  * Save PC and registers to shadow memory for self verification mode
  * before jumping to native translation.
  * On entry:
- *    rPC, rFP, rGLUE: the values that they should contain
+ *    rPC, rFP, rSELF: the values that they should contain
  *    r10: the address of the target translation.
  */
 jitSVShadowRunStart:
     mov     r0,rPC                      @ r0<- program counter
     mov     r1,rFP                      @ r1<- frame pointer
-    mov     r2,rGLUE                    @ r2<- InterpState pointer
+    mov     r2,rSELF                    @ r2<- self (Thread) pointer
     mov     r3,r10                      @ r3<- target translation
     bl      dvmSelfVerificationSaveState @ save registers to shadow space
     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
-    add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
     bx      r10                         @ jump to the translation
 
 /*
- * Restore PC, registers, and interpState to original values
+ * Restore PC, registers, and interpreter state to original values
  * before jumping back to the interpreter.
  */
 jitSVShadowRunEnd:
     mov    r1,rFP                        @ pass ending fp
+    mov    r3,rSELF                      @ pass self ptr for convenience
     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
-    ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
-    ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
-    ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
+    ldr    rPC,[rSELF,#offThread_pc]     @ restore PC
+    ldr    rFP,[rSELF,#offThread_fp]     @ restore FP
     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
     cmp    r1,#0                         @ check for punt condition
     beq    1f
     mov    r2,#kJitSelfVerification      @ ask for self verification
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r2,#kInterpEntryInstr         @ normal entry reason
-    str    r2,[rGLUE,#offGlue_entryPoint]
+    str    r2,[rSELF,#offThread_entryPoint]
     mov    r1,#1                         @ set changeInterp
     b      common_gotoBail
 
 1:                                       @ exit to interpreter without check
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9800,48 +21325,20 @@
  *  r9 is trampoline PC adjustment *in bytes*
  */
 common_periodicChecks:
-    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
-
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
-
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrnes  ip, ip, r1                  @ ip<- suspendCount | debuggerActive
-    /*
-     * Don't switch the interpreter in the libdvm_traceview build even if the
-     * profiler is active.
-     * The code here is opted for less intrusion instead of performance.
-     * That is, *pActiveProfilers is still loaded into r2 even though it is not
-     * used when WITH_INLINE_PROFILING is defined.
-     */
-#if !defined(WITH_INLINE_PROFILING)
-    orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
-#endif
-
-
-    bxeq    lr                          @ all zero, return
-
+/* TUNING - make this a direct load when interpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak] @ r3<- &interpBreak
+    /* speculatively thread-specific suspend count */
+    ldr     ip, [rSELF, #offThread_suspendCount]
+    ldr     r1, [r1]                                @ r1<- interpBreak
+    cmp     r1, #0                                  @ anything unusual?
+    bxeq    lr                                      @ return if not
     /*
      * One or more interesting events have happened.  Figure out what.
      *
-     * If debugging or profiling are compiled in, we need to disambiguate.
-     *
      * r0 still holds the reentry type.
      */
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
     cmp     ip, #0                      @ want suspend?
-    beq     1f                          @ no, must be debugger/profiler
+    beq     3f                          @ no, must be something else
 
     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
 #if defined(WITH_JIT)
@@ -9849,77 +21346,86 @@
      * Refresh the Jit's cached copy of profile table pointer.  This pointer
      * doubles as the Jit's on/off switch.
      */
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
+    mov     r0, rSELF                  @ r0<- self
     ldr     r3, [r3] @ r3 <- pJitProfTable
     EXPORT_PC()                         @ need for precise GC
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh Jit's on/off switch
 #else
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     EXPORT_PC()                         @ need for precise GC
 #endif
     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
 
     /*
-     * Reload the debugger/profiler enable flags.  We're checking to see
-     * if either of these got set while we were suspended.
-     *
-     * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
-     * is enabled or not as the profiling will be done inline.
+     * Reload the interpBreak flags - they may have changed while we
+     * were suspended.
      */
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
+/* TUNING - direct load when InterpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak]   @ r1<- &interpBreak
+    ldr     r1, [r1]                    @ r1<- interpBreak
+3:
+    /*
+     * TODO: this code is too fragile.  Need a general mechanism
+     * to identify what actions to take by submode.  Some profiling modes
+     * (instruction count) need to single-step, while method tracing
+     * may not.  Debugging with breakpoints can run unfettered, but
+     * source-level single-stepping requires Dalvik singlestepping.
+     * GC may require a one-shot action and then full-speed resumption.
+     */
+    ands    r1, #(kSubModeDebuggerActive | kSubModeEmulatorTrace | kSubModeInstCounting)
+    bxeq    lr                          @ nothing to do, return
 
-#if !defined(WITH_INLINE_PROFILING)
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrs    r1, r1, r2
-#else
-    cmp     r1, #0                      @ only consult the debuggerActive flag
-#endif
-
-    beq     2f
-
-1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
-    str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
+    @ debugger/profiler enabled, bail out; self->entryPoint was set above
+    str     r0, [rSELF, #offThread_entryPoint]  @ store r0, need for debug/prof
     add     rPC, rPC, r9                @ update rPC
     mov     r1, #1                      @ "want switch" = true
     b       common_gotoBail             @ side exit
 
-2:
-    bx      lr                          @ nothing to do, return
-
 
 /*
  * The equivalent of "goto bail", this calls through the "bail handler".
  *
- * State registers will be saved to the "glue" area before bailing.
+ * State registers will be saved to the "thread" area before bailing.
  *
  * On entry:
  *  r1 is "bool changeInterp", indicating if we want to switch to the
  *     other interpreter or just bail all the way out
  */
 common_gotoBail:
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
-    mov     r0, rGLUE                   @ r0<- glue ptr
-    b       dvmMterpStdBail             @ call(glue, changeInterp)
+    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
+    mov     r0, rSELF                   @ r0<- self ptr
+    b       dvmMterpStdBail             @ call(self, changeInterp)
 
     @add     r1, r1, #1                  @ using (boolean+1)
-    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @add     r0, rSELF, #offThread_jmpBuf @ r0<- &self->jmpBuf
     @bl      _longjmp                    @ does not return
     @bl      common_abort
 
 
 /*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+    @ prepare to copy args to "outs" area of current frame
+    add     rPC, rPC, #4                @ adjust pc to make return consistent
+    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    cmp     r2, #0                      @ no args?
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+    b       .LinvokeRangeArgs           @ handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
@@ -9933,16 +21439,15 @@
     beq     .LinvokeArgsDone            @ if no args, skip the rest
     FETCH(r1, 2)                        @ r1<- CCCC
 
+.LinvokeRangeArgs:
     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
     @ (very few methods have > 10 args; could unroll for common cases)
     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
 1:  ldr     r1, [r3], #4                @ val = *fp++
     subs    r2, r2, #1                  @ count--
     str     r1, [r10], #4               @ *outs++ = val
     bne     1b                          @ ...while count != 0
-    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
     b       .LinvokeArgsDone
 
 /*
@@ -9957,11 +21462,9 @@
     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
-    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     beq     .LinvokeArgsDone
 
-    @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
 .LinvokeNonRange:
     rsb     r2, r2, #5                  @ r2<- 5-r2
     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
@@ -9988,7 +21491,9 @@
     str     r2, [r10, #-4]!             @ *--outs = vD
 0:  @ fall through to .LinvokeArgsDone
 
-.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
+.LinvokeArgsDone: @ r0=methodToCall
+    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
     @ find space for the new stack frame, check for overflow
@@ -9996,13 +21501,15 @@
     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
 @    bl      common_dumpRegs
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
     cmp     r3, r9                      @ bottom < interpStackEnd?
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
     blo     .LstackOverflow             @ yes, this frame will overflow stack
 
     @ set up newSaveArea
+    ldr     lr, [lr]                    @ lr<- active submodes
 #ifdef EASY_GDB
     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
     str     ip, [r10, #offStackSaveArea_prevSave]
@@ -10013,13 +21520,14 @@
     mov     r9, #0
     str     r9, [r10, #offStackSaveArea_returnAddr]
 #endif
-#if defined(WITH_INLINE_PROFILING)
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     1f                          @ skip if not
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    mov     r1, rSELF
+    @ r0=methodToCall, r1=rSELF
     bl      dvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
+1:
     str     r0, [r10, #offStackSaveArea_method]
     tst     r3, #ACC_NATIVE
     bne     .LinvokeNative
@@ -10042,18 +21550,17 @@
     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
     mov     rPC, r2                         @ publish new rPC
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update state values for the new method
+    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
 #if defined(WITH_JIT)
     GET_JIT_PROF_TABLE(r0)
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     cmp     r0,#0
     bne     common_updateProfile
     GOTO_OPCODE(ip)                         @ jump to next instruction
@@ -10061,22 +21568,23 @@
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     GOTO_OPCODE(ip)                         @ jump to next instruction
 #endif
 
 .LinvokeNative:
     @ Prep for the native call
     @ r0=methodToCall, r1=newFp, r10=newSaveArea
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [lr]                    @ lr<- active submodes
 
     mov     r2, r0                      @ r2<- methodToCall
     mov     r0, r1                      @ r0<- newFp (points to args)
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
 
 #ifdef ASSIST_DEBUGGER
     /* insert fake function header to help gdb find the stack frame */
@@ -10089,36 +21597,27 @@
 .Lskip:
 #endif
 
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=JNIMethod, r6=rGLUE
-    stmfd   sp!, {r2,r6}
-#endif
-
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    bne     330f                        @ hop if so
     mov     lr, pc                      @ set return addr
     ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-
-#if defined(WITH_INLINE_PROFILING)
-    @ r0=JNIMethod, r1=rGLUE
-    ldmfd   sp!, {r0-r1}
-    bl      dvmFastNativeMethodTraceExit
-#endif
-
+220:
 #if defined(WITH_JIT)
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ Refresh Jit's on/off status
 #endif
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
 #if defined(WITH_JIT)
     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
 #endif
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
 #if defined(WITH_JIT)
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh cached on/off switch
 #endif
     bne     common_exceptionThrown      @ no, handle exception
 
@@ -10126,13 +21625,26 @@
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+330:
+    @ r2=JNIMethod, r6=rSELF
+    stmfd   sp!, {r2,r6}
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ r0=JNIMethod, r1=rSELF
+    ldmfd   sp!, {r0-r1}
+    bl      dvmFastNativeMethodTraceExit
+    b       220b
+
 .LstackOverflow:    @ r0=methodToCall
     mov     r1, r0                      @ r1<- methodToCall
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmHandleStackOverflow
     b       common_exceptionThrown
 #ifdef ASSIST_DEBUGGER
     .fnend
+    .size   dalvik_mterp, .-dalvik_mterp
 #endif
 
 
@@ -10152,8 +21664,8 @@
     sub     sp, sp, #8                  @ space for args + pad
     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
     mov     r2, r0                      @ A2<- methodToCall
-    mov     r0, rGLUE                   @ A0<- glue
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rSELF                   @ A0<- self
+    SAVE_PC_FP_TO_SELF()                @ export state to "self"
     mov     r1, r9                      @ A1<- methodCallRange
     mov     r3, rINST, lsr #8           @ A3<- AA
     str     ip, [sp, #0]                @ A4<- ip
@@ -10175,19 +21687,21 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-#if defined(WITH_INLINE_PROFILING)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    SAVEAREA_FROM_FP(r0, rFP)
+    ldr     lr, [lr]                    @ lr<- active submodes
+    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     333f
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r0, r6
-    @ r0=rGlue
+    mov     r0, rSELF
+    @ r0=rSELF
     bl      dvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
-    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+333:
     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
-    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ is this a break frame?
 #if defined(WORKAROUND_CORTEX_A9_745320)
     /* Don't use conditional loads if the HW defect exists */
@@ -10201,14 +21715,14 @@
     beq     common_gotoBail             @ break frame, bail out completely
 
     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
 #if defined(WITH_JIT)
     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
-    str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
+    str     r1, [rSELF, #offThread_methodClassDex]
+    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
     cmp     r10, #0                      @ caller is compiled code
     blxne   r10
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -10216,7 +21730,7 @@
 #else
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
+    str     r1, [rSELF, #offThread_methodClassDex]
     GOTO_OPCODE(ip)                     @ jump to next instruction
 #endif
 
@@ -10225,8 +21739,8 @@
      */
      .if    0
 .LreturnOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_returnFromMethod
     b       common_resumeAfterGlueCall
     .endif
@@ -10249,13 +21763,12 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
-    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
-    mov     r1, r10                     @ r1<- self
+    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
+    mov     r1, rSELF                   @ r1<- self
     mov     r0, r9                      @ r0<- exception
     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
     mov     r3, #0                      @ r3<- NULL
-    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
 
     /* set up args and a local for "&fp" */
     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
@@ -10263,8 +21776,8 @@
     mov     ip, sp                      @ ip<- &fp
     mov     r3, #0                      @ r3<- false
     str     ip, [sp, #-4]!              @ *--sp = &fp
-    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
-    mov     r0, r10                     @ r0<- self
+    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
+    mov     r0, rSELF                   @ r0<- self
     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
     mov     r2, r9                      @ r2<- exception
     sub     r1, rPC, r1                 @ r1<- pc - method->insns
@@ -10274,11 +21787,11 @@
     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
 
     /* fix earlier stack overflow if necessary; may trash rFP */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
     beq     1f                          @ no, skip ahead
     mov     rFP, r0                     @ save relPc result in rFP
-    mov     r0, r10                     @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     mov     r1, r9                      @ r1<- exception
     bl      dvmCleanupStackOverflow     @ call(self)
     mov     r0, rFP                     @ restore result
@@ -10293,30 +21806,30 @@
     /* adjust locals to match self->curFrame and updated PC */
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
-    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    str     r1, [rSELF, #offThread_method]  @ self->method = new method
     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
-    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
 
     /* release the tracked alloc on the exception */
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
 
     /* restore the exception if the handler wants it */
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
-    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
-.LnotCaughtLocally: @ r9=exception, r10=self
+.LnotCaughtLocally: @ r9=exception
     /* fix stack overflow if necessary */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
-    movne   r0, r10                     @ if yes: r0<- self
+    movne   r0, rSELF                   @ if yes: r0<- self
     movne   r1, r9                      @ if yes: r1<- exception
     blne    dvmCleanupStackOverflow     @ if yes: call(self)
 
@@ -10325,14 +21838,14 @@
     /* call __android_log_print(prio, tag, format, ...) */
     /* "Exception %s from %s:%d not caught locally" */
     @ dvmLineNumFromPC(method, pc - method->insns)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     ldr     r1, [r0, #offMethod_insns]
     sub     r1, rPC, r1
     asr     r1, r1, #1
     bl      dvmLineNumFromPC
     str     r0, [sp, #-4]!
     @ dvmGetMethodSourceFile(method)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     bl      dvmGetMethodSourceFile
     str     r0, [sp, #-4]!
     @ exception->clazz->descriptor
@@ -10344,9 +21857,9 @@
     mov     r0, #3                      @ LOG_DEBUG
     bl      __android_log_print
 #endif
-    str     r9, [r10, #offThread_exception] @ restore exception
+    str     r9, [rSELF, #offThread_exception] @ restore exception
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
     mov     r1, #0                      @ "want switch" = false
     b       common_gotoBail             @ bail out
@@ -10357,8 +21870,8 @@
      */
     .if     0
 .LexceptionOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_exceptionThrown
     b       common_resumeAfterGlueCall
     .endif
@@ -10369,7 +21882,7 @@
  * values and start executing at the next instruction.
  */
 common_resumeAfterGlueCall:
-    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -10377,15 +21890,14 @@
 /*
  * Invalid array index. Note that our calling convention is strange; we use r1
  * and r3 because those just happen to be the registers all our callers are
- * using. We shuffle them here before calling the C function.
+ * using. We move r3 before calling the C function, but r1 happens to match.
  * r1: index
  * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
-    mov     r0, r1
-    mov     r1, r3
-    bl      dvmThrowAIOOBE
+    mov     r0, r3
+    bl      dvmThrowArrayIndexOutOfBoundsException
     b       common_exceptionThrown
 
 /*
@@ -10393,29 +21905,28 @@
  */
 common_errDivideByZero:
     EXPORT_PC()
-    ldr     r0, strArithmeticException
-    ldr     r1, strDivideByZero
-    bl      dvmThrowException
+    ldr     r0, strDivideByZero
+    bl      dvmThrowArithmeticException
     b       common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
  */
 common_errNegativeArraySize:
     EXPORT_PC()
-    ldr     r0, strNegativeArraySizeException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1                                @ arg0 <- len
+    bl      dvmThrowNegativeArraySizeException    @ (len)
     b       common_exceptionThrown
 
 /*
  * Invocation of a non-existent method.
+ * On entry: method name in r1
  */
 common_errNoSuchMethod:
     EXPORT_PC()
-    ldr     r0, strNoSuchMethodError
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1
+    bl      dvmThrowNoSuchMethodError
     b       common_exceptionThrown
 
 /*
@@ -10425,9 +21936,8 @@
  */
 common_errNullObject:
     EXPORT_PC()
-    ldr     r0, strNullPointerException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, #0
+    bl      dvmThrowNullPointerException
     b       common_exceptionThrown
 
 /*
@@ -10563,17 +22073,8 @@
  * String references, must be close to the code that uses them.
  */
     .align  2
-strArithmeticException:
-    .word   .LstrArithmeticException
 strDivideByZero:
     .word   .LstrDivideByZero
-strNegativeArraySizeException:
-    .word   .LstrNegativeArraySizeException
-strNoSuchMethodError:
-    .word   .LstrNoSuchMethodError
-strNullPointerException:
-    .word   .LstrNullPointerException
-
 strLogTag:
     .word   .LstrLogTag
 strExceptionNotCaughtLocally:
@@ -10601,23 +22102,10 @@
 
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
-.LstrDivideByZero:
-    .asciz  "divide by zero"
 .LstrFilledNewArrayNotImpl:
     .asciz  "filled-new-array only implemented for objects and 'int'"
-.LstrInternalError:
-    .asciz  "Ljava/lang/InternalError;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrNullPointerException:
-    .asciz  "Ljava/lang/NullPointerException;"
-
+.LstrDivideByZero:
+    .asciz  "divide by zero"
 .LstrLogTag:
     .asciz  "mterp"
 .LstrExceptionNotCaughtLocally:
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 0d5502e..c657e9e 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -63,7 +63,7 @@
   reg nick      purpose
   r4  rPC       interpreted program counter, used for fetching instructions
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     self (Thread) pointer
   r7  rINST     first 16-bit code unit of current instruction
   r8  rIBASE    interpreted instruction base pointer, used for computed goto
 
@@ -75,21 +75,21 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
-/* save/restore the PC and/or FP from the glue struct */
-#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
-#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
-#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
-#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
-#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
-#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()     ldr     rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF()       str     rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF()     ldr     rFP, [rSELF, #offThread_fp]
+#define SAVE_FP_TO_SELF()       str     rFP, [rSELF, #offThread_fp]
+#define LOAD_PC_FP_FROM_SELF()  ldmia   rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF()    stmia   rSELF, {rPC, rFP}
 
 /*
  * "export" the PC to the stack frame, f/b/o future exception objects.  Must
- * be done *before* something calls dvmThrowException.
+ * be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -124,14 +124,14 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #((_count)*2)]!
 
 /*
  * The operation performed here is similar to FETCH_ADVANCE_INST, except the
  * src and dest registers are parameterized (not hard-wired to rPC and rINST).
  */
 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-        ldrh    _dreg, [_sreg, #(_count*2)]!
+        ldrh    _dreg, [_sreg, #((_count)*2)]!
 
 /*
  * Fetch the next instruction from an offset specified by _reg.  Updates
@@ -151,15 +151,15 @@
  *
  * The "_S" variant works the same but treats the value as signed.
  */
-#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
-#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #((_count)*2)]
 
 /*
  * Fetch one byte from an offset past the current PC.  Pass in the same
  * "_count" as you would for FETCH, and an additional 0/1 indicating which
  * byte of the halfword you want (lo/hi).
  */
-#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #((_count)*2+(_byte))]
 
 /*
  * Put the instruction's opcode field into the specified register.
@@ -186,8 +186,8 @@
 #define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
 
 #if defined(WITH_JIT)
-#define GET_JIT_PROF_TABLE(_reg)    ldr     _reg,[rGLUE,#offGlue_pJitProfTable]
-#define GET_JIT_THRESHOLD(_reg)     ldr     _reg,[rGLUE,#offGlue_jitThreshold]
+#define GET_JIT_PROF_TABLE(_reg)    ldr _reg,[rSELF,#offThread_pJitProfTable]
+#define GET_JIT_THRESHOLD(_reg)     ldr _reg,[rSELF,#offThread_jitThreshold]
 #endif
 
 /*
@@ -266,7 +266,7 @@
 
 /*
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *
  * This function returns a boolean "changeInterp" value.  The return comes
  * via a call to dvmMterpStdBail().
@@ -284,29 +284,28 @@
     MTERP_ENTRY2
 
     /* save stack pointer, add magic word for debuggerd */
-    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+    str     sp, [r0, #offThread_bailPtr]  @ save SP for eventual return
 
     /* set up "named" registers, figure out entry point */
-    mov     rGLUE, r0                   @ set rGLUE
-    ldr     r1, [r0, #offGlue_entryPoint]   @ enum is 4 bytes in aapcs-EABI
-    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
-    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    mov     rSELF, r0                   @ set rSELF
+    ldr     r1, [r0, #offThread_entryPoint]   @ enum is 4 bytes in aapcs-EABI
+    LOAD_PC_FP_FROM_SELF()              @ load rPC and rFP from "thread"
+    ldr     rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
     cmp     r1, #kInterpEntryInstr      @ usual case?
     bne     .Lnot_instr                 @ no, handle it
 
 #if defined(WITH_JIT)
 .LentryInstr:
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     /* Entry is always a possible trace start */
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     mov     r1, #0                      @ prepare the value for the new state
-    str     r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str     r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     cmp     r0,#0                       @ is profiling disabled?
 #if !defined(WITH_SELF_VERIFICATION)
     bne     common_updateProfile        @ profiling is enabled
 #else
-    ldr     r2, [r10, #offThread_shadowSpace]   @ to find out the jit exit state
+    ldr     r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
     beq     1f                          @ profiling is disabled
     ldr     r3, [r2, #offShadowSpace_jitExitState]  @ jit exit state
     cmp     r3, #kSVSTraceSelect        @ hot trace following?
@@ -336,20 +335,20 @@
 
 #if defined(WITH_JIT)
 .Lnot_throw:
-    ldr     r10,[rGLUE, #offGlue_jitResumeNPC]
-    ldr     r2,[rGLUE, #offGlue_jitResumeDPC]
+    ldr     r10,[rSELF, #offThread_jitResumeNPC]
+    ldr     r2,[rSELF, #offThread_jitResumeDPC]
     cmp     r1, #kInterpEntryResume     @ resuming after Jit single-step?
     bne     .Lbad_arg
     cmp     rPC,r2
     bne     .LentryInstr                @ must have branched, don't resume
 #if defined(WITH_SELF_VERIFICATION)
-    @ glue->entryPoint will be set in dvmSelfVerificationSaveState
+    @ self->entryPoint will be set in dvmSelfVerificationSaveState
     b       jitSVShadowRunStart         @ re-enter the translation after the
                                         @ single-stepped instruction
     @noreturn
 #endif
     mov     r1, #kInterpEntryInstr
-    str     r1, [rGLUE, #offGlue_entryPoint]
+    str     r1, [rSELF, #offThread_entryPoint]
     bx      r10                         @ re-enter the translation
 #endif
 
@@ -359,6 +358,7 @@
     bl      printf
     bl      dvmAbort
     .fnend
+    .size   dvmMterpStdRun, .-dvmMterpStdRun
 
 
     .global dvmMterpStdBail
@@ -374,11 +374,11 @@
  * LR to PC.
  *
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *  r1  bool changeInterp
  */
 dvmMterpStdBail:
-    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    ldr     sp, [r0, #offThread_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
     ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
@@ -563,7 +563,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -574,7 +574,7 @@
 /* File: armv5te/OP_MOVE_RESULT_WIDE.S */
     /* move-result-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
@@ -591,7 +591,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -602,14 +602,13 @@
 .L_OP_MOVE_EXCEPTION: /* 0x0d */
 /* File: armv5te/OP_MOVE_EXCEPTION.S */
     /* move-exception vAA */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    ldr     r3, [rSELF, #offThread_exception]  @ r3<- dvmGetException bypass
     mov     r1, #0                      @ r1<- 0
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
-    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    str     r1, [rSELF, #offThread_exception]  @ dvmClearException bypass
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /* ------------------------------ */
@@ -623,7 +622,7 @@
 .L_OP_RETURN: /* 0x0f */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -631,7 +630,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 /* ------------------------------ */
@@ -639,13 +638,13 @@
 .L_OP_RETURN_WIDE: /* 0x10 */
 /* File: armv5te/OP_RETURN_WIDE.S */
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
     stmia   r3, {r0-r1}                 @ retval<- r0/r1
     b       common_returnFromMethod
@@ -656,7 +655,7 @@
 /* File: armv5te/OP_RETURN_OBJECT.S */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -664,7 +663,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 
@@ -790,7 +789,7 @@
 /* File: armv5te/OP_CONST_STRING.S */
     /* const/string vAA, String@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
@@ -808,7 +807,7 @@
     /* const/string vAA, String@BBBBBBBB */
     FETCH(r0, 1)                        @ r0<- bbbb (low)
     FETCH(r1, 2)                        @ r1<- BBBB (high)
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
@@ -826,7 +825,7 @@
 /* File: armv5te/OP_CONST_CLASS.S */
     /* const/class vAA, Class@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
@@ -847,18 +846,12 @@
     /* monitor-enter vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     cmp     r1, #0                      @ null object?
-    EXPORT_PC()                         @ need for precise GC, MONITOR_TRACKING
+    EXPORT_PC()                         @ need for precise GC
     beq     common_errNullObject        @ null object, throw an exception
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     bl      dvmLockObject               @ call(self, obj)
-#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
-    ldr     r1, [r0, #offThread_exception] @ check for exception
-    cmp     r1, #0
-    bne     common_exceptionThrown      @ exception raised, bail out
-#endif
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
@@ -879,7 +872,7 @@
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
     cmp     r1, #0                      @ null object?
     beq     1f                          @ yes
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
     cmp     r0, #0                      @ failed?
     FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
@@ -901,7 +894,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     FETCH(r2, 1)                        @ r2<- BBBB
     GET_VREG(r9, r3)                    @ r9<- object
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
     cmp     r9, #0                      @ is object null?
     ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
     beq     .LOP_CHECK_CAST_okay            @ null obj, cast always succeeds
@@ -933,7 +926,7 @@
     GET_VREG(r0, r3)                    @ r0<- vB (object)
     and     r9, r9, #15                 @ r9<- A
     cmp     r0, #0                      @ is object null?
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
     beq     .LOP_INSTANCE_OF_store           @ null obj, not an instance, store r0
     FETCH(r3, 1)                        @ r3<- CCCC
     ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
@@ -973,7 +966,7 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
@@ -1003,12 +996,12 @@
     /* new-array vA, vB, class@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
     FETCH(r2, 1)                        @ r2<- CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     GET_VREG(r1, r0)                    @ r1<- vB (array length)
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     cmp     r1, #0                      @ check length
     ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
-    bmi     common_errNegativeArraySize @ negative length, bail
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ req'd for resolve, alloc
     bne     .LOP_NEW_ARRAY_finish          @ resolved, continue
@@ -1025,7 +1018,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1033,7 +1026,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1053,7 +1046,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1061,7 +1054,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_RANGE_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1099,12 +1092,11 @@
     /* throw vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     EXPORT_PC()                         @ exception handler can throw
     cmp     r1, #0                      @ null object?
     beq     common_errNullObject        @ yes, throw an NPE instead
     @ bypass dvmSetException, just store it
-    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    str     r1, [rSELF, #offThread_exception]  @ thread->exception<- obj
     b       common_exceptionThrown
 
 /* ------------------------------ */
@@ -2410,14 +2402,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2434,14 +2426,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2461,14 +2453,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2490,14 +2482,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2519,14 +2511,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2548,14 +2540,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2577,14 +2569,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2604,14 +2596,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2625,14 +2617,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2651,14 +2643,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2679,14 +2671,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2708,14 +2700,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2737,14 +2729,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2766,14 +2758,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2792,7 +2784,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2815,7 +2807,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2846,7 +2838,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2873,7 +2865,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2900,7 +2892,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2927,7 +2919,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2954,7 +2946,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2980,7 +2972,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3003,7 +2995,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -3033,13 +3025,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -3059,7 +3051,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3086,7 +3078,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3113,7 +3105,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3140,7 +3132,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3167,7 +3159,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3178,7 +3170,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3198,7 +3190,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3207,7 +3199,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3231,7 +3223,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3259,14 +3251,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodNoRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3292,9 +3284,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -3322,7 +3314,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3333,7 +3325,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3355,7 +3347,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3364,7 +3356,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3390,7 +3382,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3420,14 +3412,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3455,9 +3447,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -7415,14 +7407,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7443,14 +7435,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7470,7 +7462,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7497,7 +7489,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7525,14 +7517,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7551,14 +7543,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7574,14 +7566,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7599,7 +7591,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7629,7 +7621,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -7668,7 +7660,7 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     FETCH(r2, 1)                        @ r2<- BBBB
     EXPORT_PC()                         @ export the PC
     mov     r1, rINST, lsr #8           @ r1<- AA
@@ -7691,11 +7683,11 @@
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #12          @ r0<- B
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7721,11 +7713,11 @@
      */
     /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #8           @ r0<- AA
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_RANGE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7736,12 +7728,23 @@
 
 /* ------------------------------ */
     .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
     /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
      */
-    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    FETCH(r1, 2)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(2+1)       @ advance to next instr, load rINST
     GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
     GOTO_OPCODE(ip)                     @ execute it
 
@@ -7863,7 +7866,7 @@
     beq     common_errNullObject        @ object was null
     and     r2, r2, #15
     GET_VREG(r0, r2)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
     cmp     r0, #0
@@ -7935,7 +7938,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7963,7 +7966,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7991,14 +7994,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -8018,7 +8021,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -8045,13 +8048,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_VOLATILE_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8065,10 +8068,3110 @@
     .balign 64
 .L_OP_DISPATCH_FF: /* 0xff */
 /* File: armv5te/OP_DISPATCH_FF.S */
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<-self>methodClassDex
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[AAAAaaaa]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_CLASS_JUMBO_resolve
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r3, 3)                        @ r3<- BBBB
+    orr     r2, r0, r2, lsl #16         @ r2<- AAAAaaaa
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .LOP_CHECK_CAST_JUMBO_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_CHECK_CAST_JUMBO_resolve         @ not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .LOP_CHECK_CAST_JUMBO_fullcheck       @ no, do full check
+    b       .LOP_CHECK_CAST_JUMBO_okay            @ yes, finish up
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    FETCH(r9, 3)                        @ r9<- vBBBB
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
+    cmp     r0, #0                      @ is object null?
+    beq     .LOP_INSTANCE_OF_JUMBO_store           @ null obj, not an instance, store r0
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    orr     r3, r1, r3, lsl #16         @ r3<- AAAAaaaa
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_INSTANCE_OF_JUMBO_resolve         @ not resolved, do it now
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .LOP_NEW_INSTANCE_JUMBO_resolve         @ no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .LOP_NEW_INSTANCE_JUMBO_needinit        @ no, init class now
+.LOP_NEW_INSTANCE_JUMBO_initialized: @ r0=class
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    bl      dvmAllocObject              @ r0<- new object
+    b       .LOP_NEW_INSTANCE_JUMBO_finish          @ continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r2, 1)                        @ r2<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- vCCCC
+    orr     r2, r2, r3, lsl #16         @ r2<- AAAAaaaa
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vCCCC (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .LOP_NEW_ARRAY_JUMBO_finish          @ resolved, continue
+    b       .LOP_NEW_ARRAY_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ need for resolve and alloc
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_continue        @ yes, continue on
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/OP_IGET_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/OP_IGET_BYTE_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/OP_IGET_CHAR_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/OP_IGET_SHORT_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/OP_IPUT_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/OP_IPUT_BYTE_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/OP_IPUT_CHAR_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/OP_IPUT_SHORT_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 0
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BOOLEAN_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/OP_SGET_BYTE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BYTE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/OP_SGET_CHAR_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_CHAR_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/OP_SGET_SHORT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_SHORT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 0
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BOOLEAN_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/OP_SPUT_BYTE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BYTE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/OP_SPUT_CHAR_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_CHAR_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/OP_SPUT_SHORT_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_SHORT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a virtual method call.
+     */
+    /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ yes, continue on
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_JUMBO_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_JUMBO_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodJumbo    @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodJumbo    @ yes, continue on
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodJumbo    @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r2, 4)                        @ r2<- CCCC
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    EXPORT_PC()                         @ must export for invoke
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodJumbo    @ jump to common handler
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/OP_UNUSED_27FF.S */
 /* File: armv5te/unused.S */
     bl      common_abort
 
 
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/OP_UNUSED_28FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/OP_UNUSED_29FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/OP_UNUSED_2AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/OP_UNUSED_2BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/OP_UNUSED_2CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/OP_UNUSED_2DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/OP_UNUSED_2EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/OP_UNUSED_2FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/OP_UNUSED_30FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/OP_UNUSED_31FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/OP_UNUSED_32FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/OP_UNUSED_33FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/OP_UNUSED_34FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/OP_UNUSED_35FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/OP_UNUSED_36FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/OP_UNUSED_37FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/OP_UNUSED_38FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/OP_UNUSED_39FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/OP_UNUSED_3AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/OP_UNUSED_3BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/OP_UNUSED_3CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/OP_UNUSED_3DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/OP_UNUSED_3EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/OP_UNUSED_3FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/OP_UNUSED_40FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/OP_UNUSED_41FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/OP_UNUSED_42FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/OP_UNUSED_43FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/OP_UNUSED_44FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/OP_UNUSED_45FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/OP_UNUSED_46FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/OP_UNUSED_47FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/OP_UNUSED_48FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/OP_UNUSED_49FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/OP_UNUSED_4AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/OP_UNUSED_4BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/OP_UNUSED_4CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/OP_UNUSED_4DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/OP_UNUSED_4EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/OP_UNUSED_4FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/OP_UNUSED_50FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/OP_UNUSED_51FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/OP_UNUSED_52FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/OP_UNUSED_53FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/OP_UNUSED_54FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/OP_UNUSED_55FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/OP_UNUSED_56FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/OP_UNUSED_57FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/OP_UNUSED_58FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/OP_UNUSED_59FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/OP_UNUSED_5AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/OP_UNUSED_5BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/OP_UNUSED_5CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/OP_UNUSED_5DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/OP_UNUSED_5EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/OP_UNUSED_5FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/OP_UNUSED_60FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/OP_UNUSED_61FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/OP_UNUSED_62FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/OP_UNUSED_63FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/OP_UNUSED_64FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/OP_UNUSED_65FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/OP_UNUSED_66FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/OP_UNUSED_67FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/OP_UNUSED_68FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/OP_UNUSED_69FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/OP_UNUSED_6AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/OP_UNUSED_6BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/OP_UNUSED_6CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/OP_UNUSED_6DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/OP_UNUSED_6EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/OP_UNUSED_6FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/OP_UNUSED_70FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/OP_UNUSED_71FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/OP_UNUSED_72FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/OP_UNUSED_73FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/OP_UNUSED_74FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/OP_UNUSED_75FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/OP_UNUSED_76FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/OP_UNUSED_77FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/OP_UNUSED_78FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/OP_UNUSED_79FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/OP_UNUSED_7AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/OP_UNUSED_7BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/OP_UNUSED_7CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/OP_UNUSED_7DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/OP_UNUSED_7EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/OP_UNUSED_7FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/OP_UNUSED_80FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/OP_UNUSED_81FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/OP_UNUSED_82FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/OP_UNUSED_83FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/OP_UNUSED_84FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/OP_UNUSED_85FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/OP_UNUSED_86FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/OP_UNUSED_87FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/OP_UNUSED_88FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/OP_UNUSED_89FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/OP_UNUSED_8AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/OP_UNUSED_8BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/OP_UNUSED_8CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/OP_UNUSED_8DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/OP_UNUSED_8EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/OP_UNUSED_8FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/OP_UNUSED_90FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/OP_UNUSED_91FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/OP_UNUSED_92FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/OP_UNUSED_93FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/OP_UNUSED_94FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/OP_UNUSED_95FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/OP_UNUSED_96FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/OP_UNUSED_97FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/OP_UNUSED_98FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/OP_UNUSED_99FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/OP_UNUSED_9AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/OP_UNUSED_9BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/OP_UNUSED_9CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/OP_UNUSED_9DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/OP_UNUSED_9EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/OP_UNUSED_9FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/OP_UNUSED_A0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/OP_UNUSED_A1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/OP_UNUSED_A2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/OP_UNUSED_A3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/OP_UNUSED_A4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/OP_UNUSED_A5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/OP_UNUSED_A6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/OP_UNUSED_A7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/OP_UNUSED_A8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/OP_UNUSED_A9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/OP_UNUSED_AAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/OP_UNUSED_ABFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/OP_UNUSED_ACFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/OP_UNUSED_ADFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/OP_UNUSED_AEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/OP_UNUSED_AFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/OP_UNUSED_B0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/OP_UNUSED_B1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/OP_UNUSED_B2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/OP_UNUSED_B3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/OP_UNUSED_B4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/OP_UNUSED_B5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/OP_UNUSED_B6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/OP_UNUSED_B7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/OP_UNUSED_B8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/OP_UNUSED_B9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/OP_UNUSED_BAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/OP_UNUSED_BBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/OP_UNUSED_BCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/OP_UNUSED_BDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/OP_UNUSED_BEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/OP_UNUSED_BFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/OP_UNUSED_C0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/OP_UNUSED_C1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/OP_UNUSED_C2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/OP_UNUSED_C3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/OP_UNUSED_C4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/OP_UNUSED_C5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/OP_UNUSED_C6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/OP_UNUSED_C7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/OP_UNUSED_C8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/OP_UNUSED_C9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/OP_UNUSED_CAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/OP_UNUSED_CBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/OP_UNUSED_CCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/OP_UNUSED_CDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/OP_UNUSED_CEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/OP_UNUSED_CFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/OP_UNUSED_D0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/OP_UNUSED_D1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/OP_UNUSED_D2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/OP_UNUSED_D3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/OP_UNUSED_D4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/OP_UNUSED_D5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/OP_UNUSED_D6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/OP_UNUSED_D7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/OP_UNUSED_D8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/OP_UNUSED_D9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/OP_UNUSED_DAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/OP_UNUSED_DBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/OP_UNUSED_DCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/OP_UNUSED_DDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/OP_UNUSED_DEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/OP_UNUSED_DFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/OP_UNUSED_E0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/OP_UNUSED_E1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/OP_UNUSED_E2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/OP_UNUSED_E3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/OP_UNUSED_E4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/OP_UNUSED_E5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/OP_UNUSED_E6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/OP_UNUSED_E7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/OP_UNUSED_E8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/OP_UNUSED_E9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/OP_UNUSED_EAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/OP_UNUSED_EBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/OP_UNUSED_ECFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/OP_UNUSED_EDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/OP_UNUSED_EEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/OP_UNUSED_EFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/OP_UNUSED_F0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/OP_UNUSED_F1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
+     */
+    FETCH(r1, 4)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(4+1)       @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/OP_IGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/OP_SGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 1
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 1
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, Class@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    orr     r2, r1, r2, lsl #16         @ r2<- AAAAaaaa
+    EXPORT_PC()                         @ export the PC
+    FETCH(r1, 3)                        @ r1<- BBBB
+    bl      dvmThrowVerificationError   @ always throws
+    b       common_exceptionThrown      @ handle exception
 
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
@@ -8095,7 +11198,7 @@
      */
 .LOP_CONST_STRING_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -8114,7 +11217,7 @@
      */
 .LOP_CONST_STRING_JUMBO_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -8133,7 +11236,7 @@
      */
 .LOP_CONST_CLASS_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- Class reference
@@ -8173,7 +11276,7 @@
      */
 .LOP_CHECK_CAST_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r1, r2                      @ r1<- BBBB
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
@@ -8226,7 +11329,7 @@
      */
 .LOP_INSTANCE_OF_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     mov     r1, r3                      @ r1<- BBBB
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
@@ -8270,7 +11373,7 @@
      *  r1 holds BBBB
      */
 .LOP_NEW_INSTANCE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
@@ -8278,9 +11381,6 @@
     bne     .LOP_NEW_INSTANCE_resolved        @ no, continue
     b       common_exceptionThrown      @ yes, handle exception
 
-.LstrInstantiationErrorPtr:
-    .word   .LstrInstantiationError
-
 /* continuation for OP_NEW_ARRAY */
 
 
@@ -8291,7 +11391,7 @@
      *  r2 holds class ref CCCC
      */
 .LOP_NEW_ARRAY_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r9, r1                      @ r9<- length (save)
     mov     r1, r2                      @ r1<- CCCC
     mov     r2, #0                      @ r2<- false
@@ -8346,8 +11446,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8379,9 +11479,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8392,16 +11492,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!0)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_FILLED_NEW_ARRAY_RANGE */
@@ -8430,8 +11527,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8463,9 +11560,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8476,16 +11573,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!1)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_CMPL_FLOAT */
@@ -8701,7 +11795,7 @@
     beq     .LOP_APUT_OBJECT_throw           @ no
     mov     r1, rINST                   @ r1<- arrayObj
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
-    ldr     r2, [rGLUE, #offGlue_cardTable]     @ get biased CT base
+    ldr     r2, [rSELF, #offThread_cardTable]     @ get biased CT base
     add     r10, #offArrayObject_contents   @ r0<- pointer to slot
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     str     r9, [r10]                   @ vBB[vCC]<- vAA
@@ -8931,7 +12025,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -9032,7 +12126,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9049,7 +12143,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9064,7 +12158,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9079,7 +12173,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9094,7 +12188,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9109,7 +12203,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9124,7 +12218,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9139,7 +12233,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9157,7 +12251,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9171,7 +12265,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     @ no-op                             @ releasing store
@@ -9187,7 +12281,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9202,7 +12296,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9217,7 +12311,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9232,7 +12326,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9299,7 +12393,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -9367,7 +12461,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_RANGE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -9564,7 +12658,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9579,7 +12673,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9667,7 +12761,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9685,7 +12779,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9772,7 +12866,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -9789,7 +12883,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9802,7 +12896,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SMP_DMB                            @ releasing store
@@ -9811,10 +12905,8455 @@
     strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+/* continuation for OP_CONST_CLASS_JUMBO */
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: AAAAAAAA (Class ref)
+     *  r9: target register
+     */
+.LOP_CONST_CLASS_JUMBO_resolve:
+    EXPORT_PC()
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds desired class resolved from AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    mov     r10, r1                     @ avoid ClassObject getting clobbered
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .LOP_CHECK_CAST_JUMBO_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC()                         @ about to throw
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz (actual class)
+    mov     r1, r10                     @ r1<- desired class
+    bl      dvmThrowClassCastException
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get the next opcode.
+     */
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_CHECK_CAST_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+    /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .LOP_INSTANCE_OF_JUMBO_trivial         @ yes, trivial finish
+    @ fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to OP_INSTANCE_OF_JUMBO_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b OP_INSTANCE_OF_JUMBO_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    mov     r1, r3                      @ r1<- AAAAAAAA
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+    .balign 32                          @ minimize cache lines
+.LOP_NEW_INSTANCE_JUMBO_finish: @ r0=new object
+    FETCH(r3, 3)                        @ r3<- BBBB
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .LOP_NEW_INSTANCE_JUMBO_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds AAAAAAAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_NEW_INSTANCE_JUMBO_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref AAAAAAAA
+     */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to OP_NEW_ARRAY_JUMBO_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    FETCH(r2, 3)                        @ r2<- vBBBB
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    rINST, [r3, #1]             @ rINST<- descriptor[1]
+    FETCH(r1, 3)                        @ r1<- BBBB (length)
+    cmp     rINST, #'I'                 @ array of ints?
+    cmpne   rINST, #'L'                 @ array of objects?
+    cmpne   rINST, #'['                 @ array of arrays?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl         @ no, not handled yet
+    bl      dvmAllocArrayByClass        @ r0<- call(arClass, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 4)                        @ r1<- CCCC
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(5)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC, r9=BBBB (length)
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+
+2:  ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
+    cmp     r1, #'I'                         @ Is int array?
+    strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+    GOTO_OPCODE(ip)                          @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
+    b       common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     0
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     0
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+
+.LOP_SPUT_OBJECT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    FETCH(r10, 4)                       @ r10<- CCCC
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_JUMBO_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+.LOP_INVOKE_SUPER_JUMBO_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_JUMBO */
+
+    /*
+     * On entry:
+     *  r1 = reference (CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_JUMBO_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     1
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     1
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
     .size   dvmAsmSisterStart, .-dvmAsmSisterStart
     .global dvmAsmSisterEnd
 dvmAsmSisterEnd:
 
+
+    .global dvmAsmAltInstructionStart
+    .type   dvmAsmAltInstructionStart, %function
+dvmAsmAltInstructionStart:
+    .text
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (0 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (1 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (2 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (3 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (4 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (5 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (6 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (7 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (8 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (9 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (10 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (11 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (12 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (13 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (14 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (15 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (16 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (17 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (18 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (19 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (20 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (21 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (22 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (23 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (24 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (25 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (26 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (27 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (28 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (29 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (30 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (31 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (32 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (33 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (34 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (35 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (36 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (37 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (38 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (39 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (40 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (41 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (42 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (43 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (44 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (45 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (46 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (47 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (48 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (49 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (50 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (51 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (52 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (53 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (54 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (55 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (56 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (57 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (58 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (59 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (60 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (61 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (62 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (63 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (64 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (65 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (66 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (67 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (68 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (69 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (70 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (71 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (72 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (73 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (74 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (75 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (76 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (77 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (78 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (79 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (80 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (81 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (82 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (83 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (84 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (85 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (86 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (87 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (88 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (89 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (90 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (91 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (92 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (93 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (94 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (95 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (96 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (97 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (98 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (99 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (100 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (101 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (102 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (103 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (104 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (105 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (106 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (107 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (108 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (109 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (110 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (111 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (112 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (113 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (114 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (115 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (116 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (117 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (118 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (119 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (120 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (121 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (122 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (123 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (124 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (125 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (126 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (127 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (128 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (129 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (130 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (131 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (132 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (133 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (134 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (135 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (136 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (137 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (138 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (139 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (140 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (141 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (142 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (143 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (144 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (145 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (146 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (147 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (148 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (149 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (150 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (151 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (152 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (153 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (154 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (155 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (156 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (157 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (158 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (159 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (160 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (161 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (162 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (163 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (164 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (165 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (166 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (167 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (168 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (169 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (170 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (171 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (172 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (173 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (174 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (175 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (176 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (177 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (178 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (179 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (180 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (181 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (182 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (183 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (184 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (185 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (186 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (187 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (188 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (189 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (190 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (191 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (192 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (193 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (194 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (195 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (196 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (197 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (198 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (199 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (200 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (201 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (202 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (203 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (204 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (205 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (206 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (207 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (208 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (209 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (210 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (211 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (212 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (213 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (214 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (215 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (216 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (217 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (218 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (219 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (220 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (221 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (222 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (223 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (224 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (225 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (226 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (227 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (228 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (229 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (230 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (231 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (232 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (233 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (234 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (235 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (236 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (237 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (238 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (239 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (240 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (241 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (242 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (243 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (244 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (245 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (246 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (247 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (248 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (249 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (250 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (251 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (252 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (253 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (254 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (255 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (256 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (257 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (258 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (259 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (260 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (261 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (262 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (263 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (264 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (265 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (266 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (267 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (268 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (269 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (270 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (271 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (272 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (273 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (274 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (275 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (276 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (277 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (278 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (279 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (280 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (281 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (282 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (283 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (284 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (285 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (286 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (287 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (288 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (289 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (290 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (291 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (292 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (293 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (294 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (295 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (296 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (297 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (298 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (299 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (300 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (301 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (302 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (303 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (304 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (305 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (306 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (307 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (308 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (309 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (310 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (311 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (312 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (313 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (314 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (315 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (316 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (317 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (318 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (319 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (320 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (321 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (322 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (323 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (324 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (325 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (326 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (327 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (328 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (329 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (330 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (331 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (332 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (333 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (334 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (335 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (336 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (337 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (338 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (339 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (340 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (341 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (342 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (343 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (344 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (345 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (346 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (347 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (348 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (349 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (350 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (351 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (352 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (353 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (354 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (355 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (356 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (357 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (358 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (359 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (360 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (361 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (362 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (363 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (364 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (365 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (366 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (367 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (368 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (369 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (370 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (371 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (372 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (373 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (374 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (375 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (376 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (377 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (378 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (379 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (380 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (381 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (382 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (383 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (384 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (385 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (386 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (387 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (388 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (389 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (390 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (391 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (392 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (393 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (394 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (395 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (396 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (397 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (398 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (399 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (400 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (401 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (402 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (403 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (404 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (405 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (406 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (407 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (408 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (409 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (410 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (411 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (412 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (413 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (414 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (415 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (416 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (417 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (418 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (419 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (420 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (421 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (422 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (423 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (424 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (425 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (426 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (427 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (428 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (429 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (430 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (431 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (432 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (433 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (434 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (435 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (436 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (437 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (438 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (439 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (440 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (441 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (442 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (443 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (444 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (445 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (446 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (447 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (448 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (449 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (450 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (451 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (452 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (453 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (454 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (455 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (456 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (457 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (458 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (459 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (460 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (461 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (462 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (463 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (464 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (465 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (466 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (467 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (468 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (469 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (470 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (471 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (472 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (473 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (474 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (475 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (476 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (477 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (478 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (479 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (480 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (481 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (482 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (483 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (484 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (485 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (486 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (487 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (488 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (489 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (490 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (491 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (492 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (493 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (494 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (495 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (496 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (497 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (498 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (499 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (500 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (501 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (502 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (503 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (504 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (505 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (506 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (507 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (508 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (509 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (510 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (511 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+    .balign 64
+    .size   dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+    .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
 /* File: armv5te/footer.S */
 
 /*
@@ -9832,71 +21371,64 @@
 #if defined(WITH_SELF_VERIFICATION)
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     mov    r3, #0
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChainNoProfile
 dvmJitToInterpNoChainNoProfile:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 #else
 /*
@@ -9908,7 +21440,6 @@
  */
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    rPC, r0
 #if defined(WITH_JIT_TUNING)
     mov    r0,lr
@@ -9916,8 +21447,8 @@
 #endif
     EXPORT_PC()
     mov    r0, #0
-    str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
-    adrl   rIBASE, dvmAsmInstructionStart
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9931,17 +21462,17 @@
  */
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r1,#kInterpEntryInstr
     @ enum is 4 byte in aapcs-EABI
-    str    r1, [rGLUE, #offGlue_entryPoint]
+    str    r1, [rSELF, #offThread_entryPoint]
     mov    rPC,r0
     EXPORT_PC()
 
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r1,#1                  @ set changeInterp to bail to debug interp
     b      common_gotoBail
 
@@ -9954,10 +21485,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0                    @ !0 means translation exists
@@ -9972,12 +21502,11 @@
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST, #-4              @  .. which is 9 bytes back
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr       @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    2f
     mov    r1,rINST
@@ -9990,7 +21519,7 @@
 
 /* No translation, so request one if profiling isn't disabled*/
 2:
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     cmp    r0, #0
@@ -10016,15 +21545,14 @@
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST,#-4               @ .. which is 9 bytes back
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNormal
 #endif
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    toInterpreter            @ go if not, otherwise do chain
     mov    r1,rINST
@@ -10044,16 +21572,15 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
     bxne   r0                       @ continue native execution if so
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -10067,10 +21594,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
@@ -10079,13 +21605,13 @@
 
 /*
  * No translation, restore interpreter regs and start interpreting.
- * rGLUE & rFP were preserved in the translated code, and rPC has
+ * rSELF & rFP were preserved in the translated code, and rPC has
  * already been restored by the time we get here.  We'll need to set
  * up rIBASE & rINST, and load the address of the JitTable into r0.
  */
 toInterpreter:
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_JIT_PROF_TABLE(r0)
     @ NOTE: intended fallthrough
@@ -10117,13 +21643,13 @@
  * is already a native translation in place (and, if so,
  * jump to it now).
  */
+
     GET_JIT_THRESHOLD(r1)
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
     EXPORT_PC()
     mov     r0,rPC
-    bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
-    str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl      dvmJitGetTraceAddr          @ r0<- dvmJitGetTraceAddr(rPC)
+    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov     r1, rPC                     @ arg1 of translation may need this
     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
     cmp     r0,#0
@@ -10144,9 +21670,8 @@
     cmp     r0, r10                     @ special case?
     bne     jitSVShadowRunStart         @ set up self verification shadow space
     @ Need to clear the inJitCodeCache flag
-    ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
     mov    r3, #0                       @ 0 means not in the JIT code cache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
     /* no return */
@@ -10157,9 +21682,10 @@
  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
  */
 common_selectTrace:
-    str     r2,[rGLUE,#offGlue_jitState]
+
+    str     r2,[rSELF,#offThread_jitState]
     mov     r2,#kInterpEntryInstr       @ normal entry reason
-    str     r2,[rGLUE,#offGlue_entryPoint]
+    str     r2,[rSELF,#offThread_entryPoint]
     mov     r1,#1                       @ set changeInterp
     b       common_gotoBail
 
@@ -10168,42 +21694,41 @@
  * Save PC and registers to shadow memory for self verification mode
  * before jumping to native translation.
  * On entry:
- *    rPC, rFP, rGLUE: the values that they should contain
+ *    rPC, rFP, rSELF: the values that they should contain
  *    r10: the address of the target translation.
  */
 jitSVShadowRunStart:
     mov     r0,rPC                      @ r0<- program counter
     mov     r1,rFP                      @ r1<- frame pointer
-    mov     r2,rGLUE                    @ r2<- InterpState pointer
+    mov     r2,rSELF                    @ r2<- self (Thread) pointer
     mov     r3,r10                      @ r3<- target translation
     bl      dvmSelfVerificationSaveState @ save registers to shadow space
     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
-    add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
     bx      r10                         @ jump to the translation
 
 /*
- * Restore PC, registers, and interpState to original values
+ * Restore PC, registers, and interpreter state to original values
  * before jumping back to the interpreter.
  */
 jitSVShadowRunEnd:
     mov    r1,rFP                        @ pass ending fp
+    mov    r3,rSELF                      @ pass self ptr for convenience
     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
-    ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
-    ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
-    ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
+    ldr    rPC,[rSELF,#offThread_pc]     @ restore PC
+    ldr    rFP,[rSELF,#offThread_fp]     @ restore FP
     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
     cmp    r1,#0                         @ check for punt condition
     beq    1f
     mov    r2,#kJitSelfVerification      @ ask for self verification
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r2,#kInterpEntryInstr         @ normal entry reason
-    str    r2,[rGLUE,#offGlue_entryPoint]
+    str    r2,[rSELF,#offThread_entryPoint]
     mov    r1,#1                         @ set changeInterp
     b      common_gotoBail
 
 1:                                       @ exit to interpreter without check
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -10258,48 +21783,20 @@
  *  r9 is trampoline PC adjustment *in bytes*
  */
 common_periodicChecks:
-    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
-
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
-
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrnes  ip, ip, r1                  @ ip<- suspendCount | debuggerActive
-    /*
-     * Don't switch the interpreter in the libdvm_traceview build even if the
-     * profiler is active.
-     * The code here is opted for less intrusion instead of performance.
-     * That is, *pActiveProfilers is still loaded into r2 even though it is not
-     * used when WITH_INLINE_PROFILING is defined.
-     */
-#if !defined(WITH_INLINE_PROFILING)
-    orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
-#endif
-
-
-    bxeq    lr                          @ all zero, return
-
+/* TUNING - make this a direct load when interpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak] @ r3<- &interpBreak
+    /* speculatively thread-specific suspend count */
+    ldr     ip, [rSELF, #offThread_suspendCount]
+    ldr     r1, [r1]                                @ r1<- interpBreak
+    cmp     r1, #0                                  @ anything unusual?
+    bxeq    lr                                      @ return if not
     /*
      * One or more interesting events have happened.  Figure out what.
      *
-     * If debugging or profiling are compiled in, we need to disambiguate.
-     *
      * r0 still holds the reentry type.
      */
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
     cmp     ip, #0                      @ want suspend?
-    beq     1f                          @ no, must be debugger/profiler
+    beq     3f                          @ no, must be something else
 
     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
 #if defined(WITH_JIT)
@@ -10307,77 +21804,86 @@
      * Refresh the Jit's cached copy of profile table pointer.  This pointer
      * doubles as the Jit's on/off switch.
      */
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
+    mov     r0, rSELF                  @ r0<- self
     ldr     r3, [r3] @ r3 <- pJitProfTable
     EXPORT_PC()                         @ need for precise GC
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh Jit's on/off switch
 #else
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     EXPORT_PC()                         @ need for precise GC
 #endif
     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
 
     /*
-     * Reload the debugger/profiler enable flags.  We're checking to see
-     * if either of these got set while we were suspended.
-     *
-     * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
-     * is enabled or not as the profiling will be done inline.
+     * Reload the interpBreak flags - they may have changed while we
+     * were suspended.
      */
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
+/* TUNING - direct load when InterpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak]   @ r1<- &interpBreak
+    ldr     r1, [r1]                    @ r1<- interpBreak
+3:
+    /*
+     * TODO: this code is too fragile.  Need a general mechanism
+     * to identify what actions to take by submode.  Some profiling modes
+     * (instruction count) need to single-step, while method tracing
+     * may not.  Debugging with breakpoints can run unfettered, but
+     * source-level single-stepping requires Dalvik singlestepping.
+     * GC may require a one-shot action and then full-speed resumption.
+     */
+    ands    r1, #(kSubModeDebuggerActive | kSubModeEmulatorTrace | kSubModeInstCounting)
+    bxeq    lr                          @ nothing to do, return
 
-#if !defined(WITH_INLINE_PROFILING)
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrs    r1, r1, r2
-#else
-    cmp     r1, #0                      @ only consult the debuggerActive flag
-#endif
-
-    beq     2f
-
-1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
-    str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
+    @ debugger/profiler enabled, bail out; self->entryPoint was set above
+    str     r0, [rSELF, #offThread_entryPoint]  @ store r0, need for debug/prof
     add     rPC, rPC, r9                @ update rPC
     mov     r1, #1                      @ "want switch" = true
     b       common_gotoBail             @ side exit
 
-2:
-    bx      lr                          @ nothing to do, return
-
 
 /*
  * The equivalent of "goto bail", this calls through the "bail handler".
  *
- * State registers will be saved to the "glue" area before bailing.
+ * State registers will be saved to the "thread" area before bailing.
  *
  * On entry:
  *  r1 is "bool changeInterp", indicating if we want to switch to the
  *     other interpreter or just bail all the way out
  */
 common_gotoBail:
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
-    mov     r0, rGLUE                   @ r0<- glue ptr
-    b       dvmMterpStdBail             @ call(glue, changeInterp)
+    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
+    mov     r0, rSELF                   @ r0<- self ptr
+    b       dvmMterpStdBail             @ call(self, changeInterp)
 
     @add     r1, r1, #1                  @ using (boolean+1)
-    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @add     r0, rSELF, #offThread_jmpBuf @ r0<- &self->jmpBuf
     @bl      _longjmp                    @ does not return
     @bl      common_abort
 
 
 /*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+    @ prepare to copy args to "outs" area of current frame
+    add     rPC, rPC, #4                @ adjust pc to make return consistent
+    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    cmp     r2, #0                      @ no args?
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+    b       .LinvokeRangeArgs           @ handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
@@ -10391,16 +21897,15 @@
     beq     .LinvokeArgsDone            @ if no args, skip the rest
     FETCH(r1, 2)                        @ r1<- CCCC
 
+.LinvokeRangeArgs:
     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
     @ (very few methods have > 10 args; could unroll for common cases)
     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
 1:  ldr     r1, [r3], #4                @ val = *fp++
     subs    r2, r2, #1                  @ count--
     str     r1, [r10], #4               @ *outs++ = val
     bne     1b                          @ ...while count != 0
-    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
     b       .LinvokeArgsDone
 
 /*
@@ -10415,11 +21920,9 @@
     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
-    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     beq     .LinvokeArgsDone
 
-    @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
 .LinvokeNonRange:
     rsb     r2, r2, #5                  @ r2<- 5-r2
     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
@@ -10446,7 +21949,9 @@
     str     r2, [r10, #-4]!             @ *--outs = vD
 0:  @ fall through to .LinvokeArgsDone
 
-.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
+.LinvokeArgsDone: @ r0=methodToCall
+    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
     @ find space for the new stack frame, check for overflow
@@ -10454,13 +21959,15 @@
     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
 @    bl      common_dumpRegs
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
     cmp     r3, r9                      @ bottom < interpStackEnd?
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
     blo     .LstackOverflow             @ yes, this frame will overflow stack
 
     @ set up newSaveArea
+    ldr     lr, [lr]                    @ lr<- active submodes
 #ifdef EASY_GDB
     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
     str     ip, [r10, #offStackSaveArea_prevSave]
@@ -10471,13 +21978,14 @@
     mov     r9, #0
     str     r9, [r10, #offStackSaveArea_returnAddr]
 #endif
-#if defined(WITH_INLINE_PROFILING)
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     1f                          @ skip if not
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    mov     r1, rSELF
+    @ r0=methodToCall, r1=rSELF
     bl      dvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
+1:
     str     r0, [r10, #offStackSaveArea_method]
     tst     r3, #ACC_NATIVE
     bne     .LinvokeNative
@@ -10500,18 +22008,17 @@
     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
     mov     rPC, r2                         @ publish new rPC
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update state values for the new method
+    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
 #if defined(WITH_JIT)
     GET_JIT_PROF_TABLE(r0)
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     cmp     r0,#0
     bne     common_updateProfile
     GOTO_OPCODE(ip)                         @ jump to next instruction
@@ -10519,22 +22026,23 @@
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     GOTO_OPCODE(ip)                         @ jump to next instruction
 #endif
 
 .LinvokeNative:
     @ Prep for the native call
     @ r0=methodToCall, r1=newFp, r10=newSaveArea
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [lr]                    @ lr<- active submodes
 
     mov     r2, r0                      @ r2<- methodToCall
     mov     r0, r1                      @ r0<- newFp (points to args)
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
 
 #ifdef ASSIST_DEBUGGER
     /* insert fake function header to help gdb find the stack frame */
@@ -10547,36 +22055,27 @@
 .Lskip:
 #endif
 
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=JNIMethod, r6=rGLUE
-    stmfd   sp!, {r2,r6}
-#endif
-
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    bne     330f                        @ hop if so
     mov     lr, pc                      @ set return addr
     ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-
-#if defined(WITH_INLINE_PROFILING)
-    @ r0=JNIMethod, r1=rGLUE
-    ldmfd   sp!, {r0-r1}
-    bl      dvmFastNativeMethodTraceExit
-#endif
-
+220:
 #if defined(WITH_JIT)
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ Refresh Jit's on/off status
 #endif
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
 #if defined(WITH_JIT)
     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
 #endif
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
 #if defined(WITH_JIT)
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh cached on/off switch
 #endif
     bne     common_exceptionThrown      @ no, handle exception
 
@@ -10584,13 +22083,26 @@
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+330:
+    @ r2=JNIMethod, r6=rSELF
+    stmfd   sp!, {r2,r6}
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ r0=JNIMethod, r1=rSELF
+    ldmfd   sp!, {r0-r1}
+    bl      dvmFastNativeMethodTraceExit
+    b       220b
+
 .LstackOverflow:    @ r0=methodToCall
     mov     r1, r0                      @ r1<- methodToCall
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmHandleStackOverflow
     b       common_exceptionThrown
 #ifdef ASSIST_DEBUGGER
     .fnend
+    .size   dalvik_mterp, .-dalvik_mterp
 #endif
 
 
@@ -10610,8 +22122,8 @@
     sub     sp, sp, #8                  @ space for args + pad
     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
     mov     r2, r0                      @ A2<- methodToCall
-    mov     r0, rGLUE                   @ A0<- glue
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rSELF                   @ A0<- self
+    SAVE_PC_FP_TO_SELF()                @ export state to "self"
     mov     r1, r9                      @ A1<- methodCallRange
     mov     r3, rINST, lsr #8           @ A3<- AA
     str     ip, [sp, #0]                @ A4<- ip
@@ -10633,19 +22145,21 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-#if defined(WITH_INLINE_PROFILING)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    SAVEAREA_FROM_FP(r0, rFP)
+    ldr     lr, [lr]                    @ lr<- active submodes
+    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     333f
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r0, r6
-    @ r0=rGlue
+    mov     r0, rSELF
+    @ r0=rSELF
     bl      dvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
-    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+333:
     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
-    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ is this a break frame?
 #if defined(WORKAROUND_CORTEX_A9_745320)
     /* Don't use conditional loads if the HW defect exists */
@@ -10659,14 +22173,14 @@
     beq     common_gotoBail             @ break frame, bail out completely
 
     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
 #if defined(WITH_JIT)
     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
-    str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
+    str     r1, [rSELF, #offThread_methodClassDex]
+    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
     cmp     r10, #0                      @ caller is compiled code
     blxne   r10
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -10674,7 +22188,7 @@
 #else
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
+    str     r1, [rSELF, #offThread_methodClassDex]
     GOTO_OPCODE(ip)                     @ jump to next instruction
 #endif
 
@@ -10683,8 +22197,8 @@
      */
      .if    0
 .LreturnOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_returnFromMethod
     b       common_resumeAfterGlueCall
     .endif
@@ -10707,13 +22221,12 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
-    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
-    mov     r1, r10                     @ r1<- self
+    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
+    mov     r1, rSELF                   @ r1<- self
     mov     r0, r9                      @ r0<- exception
     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
     mov     r3, #0                      @ r3<- NULL
-    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
 
     /* set up args and a local for "&fp" */
     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
@@ -10721,8 +22234,8 @@
     mov     ip, sp                      @ ip<- &fp
     mov     r3, #0                      @ r3<- false
     str     ip, [sp, #-4]!              @ *--sp = &fp
-    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
-    mov     r0, r10                     @ r0<- self
+    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
+    mov     r0, rSELF                   @ r0<- self
     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
     mov     r2, r9                      @ r2<- exception
     sub     r1, rPC, r1                 @ r1<- pc - method->insns
@@ -10732,11 +22245,11 @@
     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
 
     /* fix earlier stack overflow if necessary; may trash rFP */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
     beq     1f                          @ no, skip ahead
     mov     rFP, r0                     @ save relPc result in rFP
-    mov     r0, r10                     @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     mov     r1, r9                      @ r1<- exception
     bl      dvmCleanupStackOverflow     @ call(self)
     mov     r0, rFP                     @ restore result
@@ -10751,30 +22264,30 @@
     /* adjust locals to match self->curFrame and updated PC */
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
-    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    str     r1, [rSELF, #offThread_method]  @ self->method = new method
     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
-    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
 
     /* release the tracked alloc on the exception */
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
 
     /* restore the exception if the handler wants it */
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
-    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
-.LnotCaughtLocally: @ r9=exception, r10=self
+.LnotCaughtLocally: @ r9=exception
     /* fix stack overflow if necessary */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
-    movne   r0, r10                     @ if yes: r0<- self
+    movne   r0, rSELF                   @ if yes: r0<- self
     movne   r1, r9                      @ if yes: r1<- exception
     blne    dvmCleanupStackOverflow     @ if yes: call(self)
 
@@ -10783,14 +22296,14 @@
     /* call __android_log_print(prio, tag, format, ...) */
     /* "Exception %s from %s:%d not caught locally" */
     @ dvmLineNumFromPC(method, pc - method->insns)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     ldr     r1, [r0, #offMethod_insns]
     sub     r1, rPC, r1
     asr     r1, r1, #1
     bl      dvmLineNumFromPC
     str     r0, [sp, #-4]!
     @ dvmGetMethodSourceFile(method)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     bl      dvmGetMethodSourceFile
     str     r0, [sp, #-4]!
     @ exception->clazz->descriptor
@@ -10802,9 +22315,9 @@
     mov     r0, #3                      @ LOG_DEBUG
     bl      __android_log_print
 #endif
-    str     r9, [r10, #offThread_exception] @ restore exception
+    str     r9, [rSELF, #offThread_exception] @ restore exception
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
     mov     r1, #0                      @ "want switch" = false
     b       common_gotoBail             @ bail out
@@ -10815,8 +22328,8 @@
      */
     .if     0
 .LexceptionOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_exceptionThrown
     b       common_resumeAfterGlueCall
     .endif
@@ -10827,7 +22340,7 @@
  * values and start executing at the next instruction.
  */
 common_resumeAfterGlueCall:
-    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -10835,15 +22348,14 @@
 /*
  * Invalid array index. Note that our calling convention is strange; we use r1
  * and r3 because those just happen to be the registers all our callers are
- * using. We shuffle them here before calling the C function.
+ * using. We move r3 before calling the C function, but r1 happens to match.
  * r1: index
  * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
-    mov     r0, r1
-    mov     r1, r3
-    bl      dvmThrowAIOOBE
+    mov     r0, r3
+    bl      dvmThrowArrayIndexOutOfBoundsException
     b       common_exceptionThrown
 
 /*
@@ -10851,29 +22363,28 @@
  */
 common_errDivideByZero:
     EXPORT_PC()
-    ldr     r0, strArithmeticException
-    ldr     r1, strDivideByZero
-    bl      dvmThrowException
+    ldr     r0, strDivideByZero
+    bl      dvmThrowArithmeticException
     b       common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
  */
 common_errNegativeArraySize:
     EXPORT_PC()
-    ldr     r0, strNegativeArraySizeException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1                                @ arg0 <- len
+    bl      dvmThrowNegativeArraySizeException    @ (len)
     b       common_exceptionThrown
 
 /*
  * Invocation of a non-existent method.
+ * On entry: method name in r1
  */
 common_errNoSuchMethod:
     EXPORT_PC()
-    ldr     r0, strNoSuchMethodError
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1
+    bl      dvmThrowNoSuchMethodError
     b       common_exceptionThrown
 
 /*
@@ -10883,9 +22394,8 @@
  */
 common_errNullObject:
     EXPORT_PC()
-    ldr     r0, strNullPointerException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, #0
+    bl      dvmThrowNullPointerException
     b       common_exceptionThrown
 
 /*
@@ -11021,17 +22531,8 @@
  * String references, must be close to the code that uses them.
  */
     .align  2
-strArithmeticException:
-    .word   .LstrArithmeticException
 strDivideByZero:
     .word   .LstrDivideByZero
-strNegativeArraySizeException:
-    .word   .LstrNegativeArraySizeException
-strNoSuchMethodError:
-    .word   .LstrNoSuchMethodError
-strNullPointerException:
-    .word   .LstrNullPointerException
-
 strLogTag:
     .word   .LstrLogTag
 strExceptionNotCaughtLocally:
@@ -11059,23 +22560,10 @@
 
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
-.LstrDivideByZero:
-    .asciz  "divide by zero"
 .LstrFilledNewArrayNotImpl:
     .asciz  "filled-new-array only implemented for objects and 'int'"
-.LstrInternalError:
-    .asciz  "Ljava/lang/InternalError;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrNullPointerException:
-    .asciz  "Ljava/lang/NullPointerException;"
-
+.LstrDivideByZero:
+    .asciz  "divide by zero"
 .LstrLogTag:
     .asciz  "mterp"
 .LstrExceptionNotCaughtLocally:
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 1398a94..c5acc08 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -63,7 +63,7 @@
   reg nick      purpose
   r4  rPC       interpreted program counter, used for fetching instructions
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     self (Thread) pointer
   r7  rINST     first 16-bit code unit of current instruction
   r8  rIBASE    interpreted instruction base pointer, used for computed goto
 
@@ -75,21 +75,21 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
-/* save/restore the PC and/or FP from the glue struct */
-#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
-#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
-#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
-#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
-#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
-#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()     ldr     rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF()       str     rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF()     ldr     rFP, [rSELF, #offThread_fp]
+#define SAVE_FP_TO_SELF()       str     rFP, [rSELF, #offThread_fp]
+#define LOAD_PC_FP_FROM_SELF()  ldmia   rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF()    stmia   rSELF, {rPC, rFP}
 
 /*
  * "export" the PC to the stack frame, f/b/o future exception objects.  Must
- * be done *before* something calls dvmThrowException.
+ * be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -124,14 +124,14 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #((_count)*2)]!
 
 /*
  * The operation performed here is similar to FETCH_ADVANCE_INST, except the
  * src and dest registers are parameterized (not hard-wired to rPC and rINST).
  */
 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-        ldrh    _dreg, [_sreg, #(_count*2)]!
+        ldrh    _dreg, [_sreg, #((_count)*2)]!
 
 /*
  * Fetch the next instruction from an offset specified by _reg.  Updates
@@ -151,15 +151,15 @@
  *
  * The "_S" variant works the same but treats the value as signed.
  */
-#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
-#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #((_count)*2)]
 
 /*
  * Fetch one byte from an offset past the current PC.  Pass in the same
  * "_count" as you would for FETCH, and an additional 0/1 indicating which
  * byte of the halfword you want (lo/hi).
  */
-#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #((_count)*2+(_byte))]
 
 /*
  * Put the instruction's opcode field into the specified register.
@@ -186,8 +186,8 @@
 #define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
 
 #if defined(WITH_JIT)
-#define GET_JIT_PROF_TABLE(_reg)    ldr     _reg,[rGLUE,#offGlue_pJitProfTable]
-#define GET_JIT_THRESHOLD(_reg)     ldr     _reg,[rGLUE,#offGlue_jitThreshold]
+#define GET_JIT_PROF_TABLE(_reg)    ldr _reg,[rSELF,#offThread_pJitProfTable]
+#define GET_JIT_THRESHOLD(_reg)     ldr _reg,[rSELF,#offThread_jitThreshold]
 #endif
 
 /*
@@ -280,7 +280,7 @@
 
 /*
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *
  * This function returns a boolean "changeInterp" value.  The return comes
  * via a call to dvmMterpStdBail().
@@ -298,29 +298,28 @@
     MTERP_ENTRY2
 
     /* save stack pointer, add magic word for debuggerd */
-    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+    str     sp, [r0, #offThread_bailPtr]  @ save SP for eventual return
 
     /* set up "named" registers, figure out entry point */
-    mov     rGLUE, r0                   @ set rGLUE
-    ldr     r1, [r0, #offGlue_entryPoint]   @ enum is 4 bytes in aapcs-EABI
-    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
-    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    mov     rSELF, r0                   @ set rSELF
+    ldr     r1, [r0, #offThread_entryPoint]   @ enum is 4 bytes in aapcs-EABI
+    LOAD_PC_FP_FROM_SELF()              @ load rPC and rFP from "thread"
+    ldr     rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
     cmp     r1, #kInterpEntryInstr      @ usual case?
     bne     .Lnot_instr                 @ no, handle it
 
 #if defined(WITH_JIT)
 .LentryInstr:
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     /* Entry is always a possible trace start */
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     mov     r1, #0                      @ prepare the value for the new state
-    str     r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str     r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     cmp     r0,#0                       @ is profiling disabled?
 #if !defined(WITH_SELF_VERIFICATION)
     bne     common_updateProfile        @ profiling is enabled
 #else
-    ldr     r2, [r10, #offThread_shadowSpace]   @ to find out the jit exit state
+    ldr     r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
     beq     1f                          @ profiling is disabled
     ldr     r3, [r2, #offShadowSpace_jitExitState]  @ jit exit state
     cmp     r3, #kSVSTraceSelect        @ hot trace following?
@@ -350,20 +349,20 @@
 
 #if defined(WITH_JIT)
 .Lnot_throw:
-    ldr     r10,[rGLUE, #offGlue_jitResumeNPC]
-    ldr     r2,[rGLUE, #offGlue_jitResumeDPC]
+    ldr     r10,[rSELF, #offThread_jitResumeNPC]
+    ldr     r2,[rSELF, #offThread_jitResumeDPC]
     cmp     r1, #kInterpEntryResume     @ resuming after Jit single-step?
     bne     .Lbad_arg
     cmp     rPC,r2
     bne     .LentryInstr                @ must have branched, don't resume
 #if defined(WITH_SELF_VERIFICATION)
-    @ glue->entryPoint will be set in dvmSelfVerificationSaveState
+    @ self->entryPoint will be set in dvmSelfVerificationSaveState
     b       jitSVShadowRunStart         @ re-enter the translation after the
                                         @ single-stepped instruction
     @noreturn
 #endif
     mov     r1, #kInterpEntryInstr
-    str     r1, [rGLUE, #offGlue_entryPoint]
+    str     r1, [rSELF, #offThread_entryPoint]
     bx      r10                         @ re-enter the translation
 #endif
 
@@ -373,6 +372,7 @@
     bl      printf
     bl      dvmAbort
     .fnend
+    .size   dvmMterpStdRun, .-dvmMterpStdRun
 
 
     .global dvmMterpStdBail
@@ -388,11 +388,11 @@
  * LR to PC.
  *
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *  r1  bool changeInterp
  */
 dvmMterpStdBail:
-    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    ldr     sp, [r0, #offThread_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
     ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
@@ -575,7 +575,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -586,7 +586,7 @@
 /* File: armv5te/OP_MOVE_RESULT_WIDE.S */
     /* move-result-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
@@ -603,7 +603,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -614,14 +614,13 @@
 .L_OP_MOVE_EXCEPTION: /* 0x0d */
 /* File: armv5te/OP_MOVE_EXCEPTION.S */
     /* move-exception vAA */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    ldr     r3, [rSELF, #offThread_exception]  @ r3<- dvmGetException bypass
     mov     r1, #0                      @ r1<- 0
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
-    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    str     r1, [rSELF, #offThread_exception]  @ dvmClearException bypass
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /* ------------------------------ */
@@ -635,7 +634,7 @@
 .L_OP_RETURN: /* 0x0f */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -643,7 +642,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 /* ------------------------------ */
@@ -651,13 +650,13 @@
 .L_OP_RETURN_WIDE: /* 0x10 */
 /* File: armv5te/OP_RETURN_WIDE.S */
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
     stmia   r3, {r0-r1}                 @ retval<- r0/r1
     b       common_returnFromMethod
@@ -668,7 +667,7 @@
 /* File: armv5te/OP_RETURN_OBJECT.S */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -676,7 +675,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 
@@ -801,7 +800,7 @@
 /* File: armv5te/OP_CONST_STRING.S */
     /* const/string vAA, String@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
@@ -819,7 +818,7 @@
     /* const/string vAA, String@BBBBBBBB */
     FETCH(r0, 1)                        @ r0<- bbbb (low)
     FETCH(r1, 2)                        @ r1<- BBBB (high)
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
@@ -837,7 +836,7 @@
 /* File: armv5te/OP_CONST_CLASS.S */
     /* const/class vAA, Class@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
@@ -858,18 +857,12 @@
     /* monitor-enter vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     cmp     r1, #0                      @ null object?
-    EXPORT_PC()                         @ need for precise GC, MONITOR_TRACKING
+    EXPORT_PC()                         @ need for precise GC
     beq     common_errNullObject        @ null object, throw an exception
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     bl      dvmLockObject               @ call(self, obj)
-#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
-    ldr     r1, [r0, #offThread_exception] @ check for exception
-    cmp     r1, #0
-    bne     common_exceptionThrown      @ exception raised, bail out
-#endif
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
@@ -890,7 +883,7 @@
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
     cmp     r1, #0                      @ null object?
     beq     1f                          @ yes
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
     cmp     r0, #0                      @ failed?
     FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
@@ -912,7 +905,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     FETCH(r2, 1)                        @ r2<- BBBB
     GET_VREG(r9, r3)                    @ r9<- object
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
     cmp     r9, #0                      @ is object null?
     ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
     beq     .LOP_CHECK_CAST_okay            @ null obj, cast always succeeds
@@ -944,7 +937,7 @@
     GET_VREG(r0, r3)                    @ r0<- vB (object)
     and     r9, r9, #15                 @ r9<- A
     cmp     r0, #0                      @ is object null?
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
     beq     .LOP_INSTANCE_OF_store           @ null obj, not an instance, store r0
     FETCH(r3, 1)                        @ r3<- CCCC
     ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
@@ -983,7 +976,7 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
@@ -1013,12 +1006,12 @@
     /* new-array vA, vB, class@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
     FETCH(r2, 1)                        @ r2<- CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     GET_VREG(r1, r0)                    @ r1<- vB (array length)
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     cmp     r1, #0                      @ check length
     ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
-    bmi     common_errNegativeArraySize @ negative length, bail
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ req'd for resolve, alloc
     bne     .LOP_NEW_ARRAY_finish          @ resolved, continue
@@ -1035,7 +1028,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1043,7 +1036,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1063,7 +1056,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1071,7 +1064,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_RANGE_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1109,12 +1102,11 @@
     /* throw vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     EXPORT_PC()                         @ exception handler can throw
     cmp     r1, #0                      @ null object?
     beq     common_errNullObject        @ yes, throw an NPE instead
     @ bypass dvmSetException, just store it
-    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    str     r1, [rSELF, #offThread_exception]  @ thread->exception<- obj
     b       common_exceptionThrown
 
 /* ------------------------------ */
@@ -2392,14 +2384,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2416,14 +2408,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2443,14 +2435,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2472,14 +2464,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2501,14 +2493,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2530,14 +2522,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2559,14 +2551,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2586,14 +2578,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2607,14 +2599,14 @@
 /* File: armv6t2/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2633,14 +2625,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2661,14 +2653,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2690,14 +2682,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2719,14 +2711,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2748,14 +2740,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2774,7 +2766,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2797,7 +2789,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2828,7 +2820,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2855,7 +2847,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2882,7 +2874,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2909,7 +2901,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2936,7 +2928,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2962,7 +2954,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2985,7 +2977,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -3015,13 +3007,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -3041,7 +3033,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3068,7 +3060,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3095,7 +3087,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3122,7 +3114,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3149,7 +3141,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3160,7 +3152,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3180,7 +3172,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3189,7 +3181,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3213,7 +3205,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3241,14 +3233,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodNoRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3274,9 +3266,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -3304,7 +3296,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3315,7 +3307,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3337,7 +3329,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3346,7 +3338,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3372,7 +3364,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3402,14 +3394,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3437,9 +3429,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -7051,14 +7043,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7079,14 +7071,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7106,7 +7098,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7133,7 +7125,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7161,14 +7153,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7187,14 +7179,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7210,14 +7202,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7235,7 +7227,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7265,7 +7257,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -7304,7 +7296,7 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     FETCH(r2, 1)                        @ r2<- BBBB
     EXPORT_PC()                         @ export the PC
     mov     r1, rINST, lsr #8           @ r1<- AA
@@ -7327,11 +7319,11 @@
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #12          @ r0<- B
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7357,11 +7349,11 @@
      */
     /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #8           @ r0<- AA
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_RANGE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7372,12 +7364,23 @@
 
 /* ------------------------------ */
     .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
     /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
      */
-    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    FETCH(r1, 2)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(2+1)       @ advance to next instr, load rINST
     GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
     GOTO_OPCODE(ip)                     @ execute it
 
@@ -7495,7 +7498,7 @@
     beq     common_errNullObject        @ object was null
     and     r2, r2, #15
     GET_VREG(r0, r2)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
     cmp     r0, #0
@@ -7567,7 +7570,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7595,7 +7598,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7623,14 +7626,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7650,7 +7653,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7677,13 +7680,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_VOLATILE_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -7697,10 +7700,3110 @@
     .balign 64
 .L_OP_DISPATCH_FF: /* 0xff */
 /* File: armv5te/OP_DISPATCH_FF.S */
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<-self>methodClassDex
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[AAAAaaaa]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_CLASS_JUMBO_resolve
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r3, 3)                        @ r3<- BBBB
+    orr     r2, r0, r2, lsl #16         @ r2<- AAAAaaaa
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .LOP_CHECK_CAST_JUMBO_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_CHECK_CAST_JUMBO_resolve         @ not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .LOP_CHECK_CAST_JUMBO_fullcheck       @ no, do full check
+    b       .LOP_CHECK_CAST_JUMBO_okay            @ yes, finish up
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    FETCH(r9, 3)                        @ r9<- vBBBB
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
+    cmp     r0, #0                      @ is object null?
+    beq     .LOP_INSTANCE_OF_JUMBO_store           @ null obj, not an instance, store r0
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    orr     r3, r1, r3, lsl #16         @ r3<- AAAAaaaa
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_INSTANCE_OF_JUMBO_resolve         @ not resolved, do it now
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .LOP_NEW_INSTANCE_JUMBO_resolve         @ no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .LOP_NEW_INSTANCE_JUMBO_needinit        @ no, init class now
+.LOP_NEW_INSTANCE_JUMBO_initialized: @ r0=class
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    bl      dvmAllocObject              @ r0<- new object
+    b       .LOP_NEW_INSTANCE_JUMBO_finish          @ continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r2, 1)                        @ r2<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- vCCCC
+    orr     r2, r2, r3, lsl #16         @ r2<- AAAAaaaa
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vCCCC (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .LOP_NEW_ARRAY_JUMBO_finish          @ resolved, continue
+    b       .LOP_NEW_ARRAY_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ need for resolve and alloc
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_continue        @ yes, continue on
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/OP_IGET_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/OP_IGET_BYTE_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/OP_IGET_CHAR_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/OP_IGET_SHORT_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/OP_IPUT_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/OP_IPUT_BYTE_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/OP_IPUT_CHAR_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/OP_IPUT_SHORT_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 0
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BOOLEAN_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/OP_SGET_BYTE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BYTE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/OP_SGET_CHAR_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_CHAR_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/OP_SGET_SHORT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_SHORT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 0
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BOOLEAN_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/OP_SPUT_BYTE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BYTE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/OP_SPUT_CHAR_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_CHAR_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/OP_SPUT_SHORT_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_SHORT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a virtual method call.
+     */
+    /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ yes, continue on
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_JUMBO_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_JUMBO_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodJumbo    @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodJumbo    @ yes, continue on
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodJumbo    @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r2, 4)                        @ r2<- CCCC
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    EXPORT_PC()                         @ must export for invoke
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodJumbo    @ jump to common handler
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/OP_UNUSED_27FF.S */
 /* File: armv5te/unused.S */
     bl      common_abort
 
 
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/OP_UNUSED_28FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/OP_UNUSED_29FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/OP_UNUSED_2AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/OP_UNUSED_2BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/OP_UNUSED_2CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/OP_UNUSED_2DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/OP_UNUSED_2EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/OP_UNUSED_2FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/OP_UNUSED_30FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/OP_UNUSED_31FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/OP_UNUSED_32FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/OP_UNUSED_33FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/OP_UNUSED_34FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/OP_UNUSED_35FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/OP_UNUSED_36FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/OP_UNUSED_37FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/OP_UNUSED_38FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/OP_UNUSED_39FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/OP_UNUSED_3AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/OP_UNUSED_3BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/OP_UNUSED_3CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/OP_UNUSED_3DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/OP_UNUSED_3EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/OP_UNUSED_3FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/OP_UNUSED_40FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/OP_UNUSED_41FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/OP_UNUSED_42FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/OP_UNUSED_43FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/OP_UNUSED_44FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/OP_UNUSED_45FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/OP_UNUSED_46FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/OP_UNUSED_47FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/OP_UNUSED_48FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/OP_UNUSED_49FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/OP_UNUSED_4AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/OP_UNUSED_4BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/OP_UNUSED_4CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/OP_UNUSED_4DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/OP_UNUSED_4EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/OP_UNUSED_4FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/OP_UNUSED_50FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/OP_UNUSED_51FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/OP_UNUSED_52FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/OP_UNUSED_53FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/OP_UNUSED_54FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/OP_UNUSED_55FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/OP_UNUSED_56FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/OP_UNUSED_57FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/OP_UNUSED_58FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/OP_UNUSED_59FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/OP_UNUSED_5AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/OP_UNUSED_5BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/OP_UNUSED_5CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/OP_UNUSED_5DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/OP_UNUSED_5EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/OP_UNUSED_5FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/OP_UNUSED_60FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/OP_UNUSED_61FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/OP_UNUSED_62FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/OP_UNUSED_63FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/OP_UNUSED_64FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/OP_UNUSED_65FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/OP_UNUSED_66FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/OP_UNUSED_67FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/OP_UNUSED_68FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/OP_UNUSED_69FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/OP_UNUSED_6AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/OP_UNUSED_6BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/OP_UNUSED_6CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/OP_UNUSED_6DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/OP_UNUSED_6EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/OP_UNUSED_6FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/OP_UNUSED_70FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/OP_UNUSED_71FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/OP_UNUSED_72FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/OP_UNUSED_73FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/OP_UNUSED_74FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/OP_UNUSED_75FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/OP_UNUSED_76FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/OP_UNUSED_77FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/OP_UNUSED_78FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/OP_UNUSED_79FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/OP_UNUSED_7AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/OP_UNUSED_7BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/OP_UNUSED_7CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/OP_UNUSED_7DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/OP_UNUSED_7EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/OP_UNUSED_7FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/OP_UNUSED_80FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/OP_UNUSED_81FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/OP_UNUSED_82FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/OP_UNUSED_83FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/OP_UNUSED_84FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/OP_UNUSED_85FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/OP_UNUSED_86FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/OP_UNUSED_87FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/OP_UNUSED_88FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/OP_UNUSED_89FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/OP_UNUSED_8AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/OP_UNUSED_8BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/OP_UNUSED_8CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/OP_UNUSED_8DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/OP_UNUSED_8EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/OP_UNUSED_8FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/OP_UNUSED_90FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/OP_UNUSED_91FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/OP_UNUSED_92FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/OP_UNUSED_93FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/OP_UNUSED_94FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/OP_UNUSED_95FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/OP_UNUSED_96FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/OP_UNUSED_97FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/OP_UNUSED_98FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/OP_UNUSED_99FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/OP_UNUSED_9AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/OP_UNUSED_9BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/OP_UNUSED_9CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/OP_UNUSED_9DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/OP_UNUSED_9EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/OP_UNUSED_9FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/OP_UNUSED_A0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/OP_UNUSED_A1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/OP_UNUSED_A2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/OP_UNUSED_A3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/OP_UNUSED_A4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/OP_UNUSED_A5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/OP_UNUSED_A6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/OP_UNUSED_A7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/OP_UNUSED_A8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/OP_UNUSED_A9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/OP_UNUSED_AAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/OP_UNUSED_ABFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/OP_UNUSED_ACFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/OP_UNUSED_ADFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/OP_UNUSED_AEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/OP_UNUSED_AFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/OP_UNUSED_B0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/OP_UNUSED_B1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/OP_UNUSED_B2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/OP_UNUSED_B3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/OP_UNUSED_B4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/OP_UNUSED_B5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/OP_UNUSED_B6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/OP_UNUSED_B7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/OP_UNUSED_B8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/OP_UNUSED_B9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/OP_UNUSED_BAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/OP_UNUSED_BBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/OP_UNUSED_BCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/OP_UNUSED_BDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/OP_UNUSED_BEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/OP_UNUSED_BFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/OP_UNUSED_C0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/OP_UNUSED_C1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/OP_UNUSED_C2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/OP_UNUSED_C3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/OP_UNUSED_C4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/OP_UNUSED_C5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/OP_UNUSED_C6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/OP_UNUSED_C7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/OP_UNUSED_C8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/OP_UNUSED_C9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/OP_UNUSED_CAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/OP_UNUSED_CBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/OP_UNUSED_CCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/OP_UNUSED_CDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/OP_UNUSED_CEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/OP_UNUSED_CFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/OP_UNUSED_D0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/OP_UNUSED_D1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/OP_UNUSED_D2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/OP_UNUSED_D3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/OP_UNUSED_D4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/OP_UNUSED_D5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/OP_UNUSED_D6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/OP_UNUSED_D7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/OP_UNUSED_D8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/OP_UNUSED_D9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/OP_UNUSED_DAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/OP_UNUSED_DBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/OP_UNUSED_DCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/OP_UNUSED_DDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/OP_UNUSED_DEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/OP_UNUSED_DFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/OP_UNUSED_E0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/OP_UNUSED_E1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/OP_UNUSED_E2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/OP_UNUSED_E3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/OP_UNUSED_E4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/OP_UNUSED_E5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/OP_UNUSED_E6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/OP_UNUSED_E7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/OP_UNUSED_E8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/OP_UNUSED_E9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/OP_UNUSED_EAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/OP_UNUSED_EBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/OP_UNUSED_ECFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/OP_UNUSED_EDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/OP_UNUSED_EEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/OP_UNUSED_EFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/OP_UNUSED_F0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/OP_UNUSED_F1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
+     */
+    FETCH(r1, 4)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(4+1)       @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/OP_IGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/OP_SGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 1
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 1
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, Class@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    orr     r2, r1, r2, lsl #16         @ r2<- AAAAaaaa
+    EXPORT_PC()                         @ export the PC
+    FETCH(r1, 3)                        @ r1<- BBBB
+    bl      dvmThrowVerificationError   @ always throws
+    b       common_exceptionThrown      @ handle exception
 
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
@@ -7727,7 +10830,7 @@
      */
 .LOP_CONST_STRING_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7746,7 +10849,7 @@
      */
 .LOP_CONST_STRING_JUMBO_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7765,7 +10868,7 @@
      */
 .LOP_CONST_CLASS_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- Class reference
@@ -7805,7 +10908,7 @@
      */
 .LOP_CHECK_CAST_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r1, r2                      @ r1<- BBBB
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
@@ -7858,7 +10961,7 @@
      */
 .LOP_INSTANCE_OF_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     mov     r1, r3                      @ r1<- BBBB
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
@@ -7902,7 +11005,7 @@
      *  r1 holds BBBB
      */
 .LOP_NEW_INSTANCE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
@@ -7910,9 +11013,6 @@
     bne     .LOP_NEW_INSTANCE_resolved        @ no, continue
     b       common_exceptionThrown      @ yes, handle exception
 
-.LstrInstantiationErrorPtr:
-    .word   .LstrInstantiationError
-
 /* continuation for OP_NEW_ARRAY */
 
 
@@ -7923,7 +11023,7 @@
      *  r2 holds class ref CCCC
      */
 .LOP_NEW_ARRAY_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r9, r1                      @ r9<- length (save)
     mov     r1, r2                      @ r1<- CCCC
     mov     r2, #0                      @ r2<- false
@@ -7978,8 +11078,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8011,9 +11111,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8024,16 +11124,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!0)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_FILLED_NEW_ARRAY_RANGE */
@@ -8062,8 +11159,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8095,9 +11192,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8108,16 +11205,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!1)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_CMPL_FLOAT */
@@ -8197,7 +11291,7 @@
     beq     .LOP_APUT_OBJECT_throw           @ no
     mov     r1, rINST                   @ r1<- arrayObj
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
-    ldr     r2, [rGLUE, #offGlue_cardTable]     @ get biased CT base
+    ldr     r2, [rSELF, #offThread_cardTable]     @ get biased CT base
     add     r10, #offArrayObject_contents   @ r0<- pointer to slot
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     str     r9, [r10]                   @ vBB[vCC]<- vAA
@@ -8411,7 +11505,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -8512,7 +11606,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8529,7 +11623,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8544,7 +11638,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8559,7 +11653,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8574,7 +11668,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8589,7 +11683,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8604,7 +11698,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8619,7 +11713,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8637,7 +11731,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8651,7 +11745,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     @ no-op                             @ releasing store
@@ -8667,7 +11761,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8682,7 +11776,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8697,7 +11791,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8712,7 +11806,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8779,7 +11873,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -8847,7 +11941,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_RANGE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -9044,7 +12138,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9059,7 +12153,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9147,7 +12241,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9165,7 +12259,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9252,7 +12346,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -9269,7 +12363,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9282,7 +12376,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SMP_DMB                            @ releasing store
@@ -9291,10 +12385,8455 @@
     strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+/* continuation for OP_CONST_CLASS_JUMBO */
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: AAAAAAAA (Class ref)
+     *  r9: target register
+     */
+.LOP_CONST_CLASS_JUMBO_resolve:
+    EXPORT_PC()
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds desired class resolved from AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    mov     r10, r1                     @ avoid ClassObject getting clobbered
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .LOP_CHECK_CAST_JUMBO_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC()                         @ about to throw
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz (actual class)
+    mov     r1, r10                     @ r1<- desired class
+    bl      dvmThrowClassCastException
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get the next opcode.
+     */
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_CHECK_CAST_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+    /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .LOP_INSTANCE_OF_JUMBO_trivial         @ yes, trivial finish
+    @ fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to OP_INSTANCE_OF_JUMBO_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b OP_INSTANCE_OF_JUMBO_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    mov     r1, r3                      @ r1<- AAAAAAAA
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+    .balign 32                          @ minimize cache lines
+.LOP_NEW_INSTANCE_JUMBO_finish: @ r0=new object
+    FETCH(r3, 3)                        @ r3<- BBBB
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .LOP_NEW_INSTANCE_JUMBO_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds AAAAAAAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_NEW_INSTANCE_JUMBO_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref AAAAAAAA
+     */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to OP_NEW_ARRAY_JUMBO_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    FETCH(r2, 3)                        @ r2<- vBBBB
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    rINST, [r3, #1]             @ rINST<- descriptor[1]
+    FETCH(r1, 3)                        @ r1<- BBBB (length)
+    cmp     rINST, #'I'                 @ array of ints?
+    cmpne   rINST, #'L'                 @ array of objects?
+    cmpne   rINST, #'['                 @ array of arrays?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl         @ no, not handled yet
+    bl      dvmAllocArrayByClass        @ r0<- call(arClass, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 4)                        @ r1<- CCCC
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(5)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC, r9=BBBB (length)
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+
+2:  ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
+    cmp     r1, #'I'                         @ Is int array?
+    strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+    GOTO_OPCODE(ip)                          @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
+    b       common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     0
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     0
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+
+.LOP_SPUT_OBJECT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    FETCH(r10, 4)                       @ r10<- CCCC
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_JUMBO_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+.LOP_INVOKE_SUPER_JUMBO_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_JUMBO */
+
+    /*
+     * On entry:
+     *  r1 = reference (CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_JUMBO_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     1
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     1
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
     .size   dvmAsmSisterStart, .-dvmAsmSisterStart
     .global dvmAsmSisterEnd
 dvmAsmSisterEnd:
 
+
+    .global dvmAsmAltInstructionStart
+    .type   dvmAsmAltInstructionStart, %function
+dvmAsmAltInstructionStart:
+    .text
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (0 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (1 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (2 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (3 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (4 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (5 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (6 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (7 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (8 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (9 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (10 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (11 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (12 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (13 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (14 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (15 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (16 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (17 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (18 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (19 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (20 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (21 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (22 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (23 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (24 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (25 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (26 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (27 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (28 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (29 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (30 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (31 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (32 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (33 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (34 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (35 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (36 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (37 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (38 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (39 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (40 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (41 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (42 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (43 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (44 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (45 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (46 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (47 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (48 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (49 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (50 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (51 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (52 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (53 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (54 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (55 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (56 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (57 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (58 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (59 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (60 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (61 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (62 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (63 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (64 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (65 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (66 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (67 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (68 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (69 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (70 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (71 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (72 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (73 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (74 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (75 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (76 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (77 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (78 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (79 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (80 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (81 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (82 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (83 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (84 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (85 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (86 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (87 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (88 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (89 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (90 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (91 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (92 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (93 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (94 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (95 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (96 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (97 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (98 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (99 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (100 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (101 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (102 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (103 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (104 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (105 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (106 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (107 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (108 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (109 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (110 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (111 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (112 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (113 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (114 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (115 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (116 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (117 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (118 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (119 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (120 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (121 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (122 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (123 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (124 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (125 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (126 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (127 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (128 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (129 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (130 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (131 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (132 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (133 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (134 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (135 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (136 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (137 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (138 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (139 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (140 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (141 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (142 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (143 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (144 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (145 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (146 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (147 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (148 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (149 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (150 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (151 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (152 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (153 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (154 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (155 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (156 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (157 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (158 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (159 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (160 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (161 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (162 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (163 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (164 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (165 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (166 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (167 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (168 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (169 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (170 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (171 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (172 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (173 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (174 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (175 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (176 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (177 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (178 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (179 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (180 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (181 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (182 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (183 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (184 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (185 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (186 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (187 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (188 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (189 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (190 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (191 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (192 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (193 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (194 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (195 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (196 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (197 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (198 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (199 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (200 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (201 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (202 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (203 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (204 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (205 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (206 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (207 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (208 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (209 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (210 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (211 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (212 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (213 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (214 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (215 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (216 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (217 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (218 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (219 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (220 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (221 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (222 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (223 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (224 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (225 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (226 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (227 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (228 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (229 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (230 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (231 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (232 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (233 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (234 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (235 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (236 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (237 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (238 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (239 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (240 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (241 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (242 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (243 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (244 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (245 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (246 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (247 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (248 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (249 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (250 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (251 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (252 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (253 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (254 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (255 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (256 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (257 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (258 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (259 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (260 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (261 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (262 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (263 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (264 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (265 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (266 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (267 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (268 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (269 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (270 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (271 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (272 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (273 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (274 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (275 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (276 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (277 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (278 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (279 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (280 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (281 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (282 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (283 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (284 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (285 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (286 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (287 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (288 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (289 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (290 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (291 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (292 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (293 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (294 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (295 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (296 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (297 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (298 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (299 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (300 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (301 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (302 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (303 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (304 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (305 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (306 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (307 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (308 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (309 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (310 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (311 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (312 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (313 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (314 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (315 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (316 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (317 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (318 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (319 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (320 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (321 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (322 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (323 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (324 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (325 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (326 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (327 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (328 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (329 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (330 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (331 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (332 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (333 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (334 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (335 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (336 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (337 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (338 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (339 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (340 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (341 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (342 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (343 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (344 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (345 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (346 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (347 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (348 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (349 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (350 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (351 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (352 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (353 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (354 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (355 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (356 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (357 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (358 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (359 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (360 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (361 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (362 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (363 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (364 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (365 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (366 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (367 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (368 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (369 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (370 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (371 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (372 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (373 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (374 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (375 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (376 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (377 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (378 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (379 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (380 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (381 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (382 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (383 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (384 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (385 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (386 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (387 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (388 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (389 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (390 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (391 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (392 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (393 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (394 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (395 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (396 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (397 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (398 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (399 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (400 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (401 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (402 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (403 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (404 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (405 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (406 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (407 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (408 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (409 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (410 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (411 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (412 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (413 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (414 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (415 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (416 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (417 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (418 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (419 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (420 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (421 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (422 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (423 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (424 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (425 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (426 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (427 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (428 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (429 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (430 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (431 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (432 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (433 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (434 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (435 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (436 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (437 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (438 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (439 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (440 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (441 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (442 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (443 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (444 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (445 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (446 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (447 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (448 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (449 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (450 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (451 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (452 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (453 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (454 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (455 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (456 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (457 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (458 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (459 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (460 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (461 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (462 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (463 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (464 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (465 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (466 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (467 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (468 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (469 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (470 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (471 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (472 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (473 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (474 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (475 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (476 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (477 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (478 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (479 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (480 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (481 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (482 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (483 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (484 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (485 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (486 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (487 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (488 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (489 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (490 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (491 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (492 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (493 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (494 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (495 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (496 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (497 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (498 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (499 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (500 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (501 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (502 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (503 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (504 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (505 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (506 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (507 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (508 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (509 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (510 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (511 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+    .balign 64
+    .size   dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+    .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
 /* File: armv5te/footer.S */
 
 /*
@@ -9312,71 +20851,64 @@
 #if defined(WITH_SELF_VERIFICATION)
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     mov    r3, #0
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChainNoProfile
 dvmJitToInterpNoChainNoProfile:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 #else
 /*
@@ -9388,7 +20920,6 @@
  */
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    rPC, r0
 #if defined(WITH_JIT_TUNING)
     mov    r0,lr
@@ -9396,8 +20927,8 @@
 #endif
     EXPORT_PC()
     mov    r0, #0
-    str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
-    adrl   rIBASE, dvmAsmInstructionStart
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9411,17 +20942,17 @@
  */
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r1,#kInterpEntryInstr
     @ enum is 4 byte in aapcs-EABI
-    str    r1, [rGLUE, #offGlue_entryPoint]
+    str    r1, [rSELF, #offThread_entryPoint]
     mov    rPC,r0
     EXPORT_PC()
 
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r1,#1                  @ set changeInterp to bail to debug interp
     b      common_gotoBail
 
@@ -9434,10 +20965,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0                    @ !0 means translation exists
@@ -9452,12 +20982,11 @@
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST, #-4              @  .. which is 9 bytes back
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr       @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    2f
     mov    r1,rINST
@@ -9470,7 +20999,7 @@
 
 /* No translation, so request one if profiling isn't disabled*/
 2:
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     cmp    r0, #0
@@ -9496,15 +21025,14 @@
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST,#-4               @ .. which is 9 bytes back
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNormal
 #endif
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    toInterpreter            @ go if not, otherwise do chain
     mov    r1,rINST
@@ -9524,16 +21052,15 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
     bxne   r0                       @ continue native execution if so
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -9547,10 +21074,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
@@ -9559,13 +21085,13 @@
 
 /*
  * No translation, restore interpreter regs and start interpreting.
- * rGLUE & rFP were preserved in the translated code, and rPC has
+ * rSELF & rFP were preserved in the translated code, and rPC has
  * already been restored by the time we get here.  We'll need to set
  * up rIBASE & rINST, and load the address of the JitTable into r0.
  */
 toInterpreter:
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_JIT_PROF_TABLE(r0)
     @ NOTE: intended fallthrough
@@ -9597,13 +21123,13 @@
  * is already a native translation in place (and, if so,
  * jump to it now).
  */
+
     GET_JIT_THRESHOLD(r1)
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
     EXPORT_PC()
     mov     r0,rPC
-    bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
-    str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl      dvmJitGetTraceAddr          @ r0<- dvmJitGetTraceAddr(rPC)
+    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov     r1, rPC                     @ arg1 of translation may need this
     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
     cmp     r0,#0
@@ -9624,9 +21150,8 @@
     cmp     r0, r10                     @ special case?
     bne     jitSVShadowRunStart         @ set up self verification shadow space
     @ Need to clear the inJitCodeCache flag
-    ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
     mov    r3, #0                       @ 0 means not in the JIT code cache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
     /* no return */
@@ -9637,9 +21162,10 @@
  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
  */
 common_selectTrace:
-    str     r2,[rGLUE,#offGlue_jitState]
+
+    str     r2,[rSELF,#offThread_jitState]
     mov     r2,#kInterpEntryInstr       @ normal entry reason
-    str     r2,[rGLUE,#offGlue_entryPoint]
+    str     r2,[rSELF,#offThread_entryPoint]
     mov     r1,#1                       @ set changeInterp
     b       common_gotoBail
 
@@ -9648,42 +21174,41 @@
  * Save PC and registers to shadow memory for self verification mode
  * before jumping to native translation.
  * On entry:
- *    rPC, rFP, rGLUE: the values that they should contain
+ *    rPC, rFP, rSELF: the values that they should contain
  *    r10: the address of the target translation.
  */
 jitSVShadowRunStart:
     mov     r0,rPC                      @ r0<- program counter
     mov     r1,rFP                      @ r1<- frame pointer
-    mov     r2,rGLUE                    @ r2<- InterpState pointer
+    mov     r2,rSELF                    @ r2<- self (Thread) pointer
     mov     r3,r10                      @ r3<- target translation
     bl      dvmSelfVerificationSaveState @ save registers to shadow space
     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
-    add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
     bx      r10                         @ jump to the translation
 
 /*
- * Restore PC, registers, and interpState to original values
+ * Restore PC, registers, and interpreter state to original values
  * before jumping back to the interpreter.
  */
 jitSVShadowRunEnd:
     mov    r1,rFP                        @ pass ending fp
+    mov    r3,rSELF                      @ pass self ptr for convenience
     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
-    ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
-    ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
-    ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
+    ldr    rPC,[rSELF,#offThread_pc]     @ restore PC
+    ldr    rFP,[rSELF,#offThread_fp]     @ restore FP
     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
     cmp    r1,#0                         @ check for punt condition
     beq    1f
     mov    r2,#kJitSelfVerification      @ ask for self verification
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r2,#kInterpEntryInstr         @ normal entry reason
-    str    r2,[rGLUE,#offGlue_entryPoint]
+    str    r2,[rSELF,#offThread_entryPoint]
     mov    r1,#1                         @ set changeInterp
     b      common_gotoBail
 
 1:                                       @ exit to interpreter without check
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9738,48 +21263,20 @@
  *  r9 is trampoline PC adjustment *in bytes*
  */
 common_periodicChecks:
-    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
-
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
-
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrnes  ip, ip, r1                  @ ip<- suspendCount | debuggerActive
-    /*
-     * Don't switch the interpreter in the libdvm_traceview build even if the
-     * profiler is active.
-     * The code here is opted for less intrusion instead of performance.
-     * That is, *pActiveProfilers is still loaded into r2 even though it is not
-     * used when WITH_INLINE_PROFILING is defined.
-     */
-#if !defined(WITH_INLINE_PROFILING)
-    orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
-#endif
-
-
-    bxeq    lr                          @ all zero, return
-
+/* TUNING - make this a direct load when interpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak] @ r3<- &interpBreak
+    /* speculatively thread-specific suspend count */
+    ldr     ip, [rSELF, #offThread_suspendCount]
+    ldr     r1, [r1]                                @ r1<- interpBreak
+    cmp     r1, #0                                  @ anything unusual?
+    bxeq    lr                                      @ return if not
     /*
      * One or more interesting events have happened.  Figure out what.
      *
-     * If debugging or profiling are compiled in, we need to disambiguate.
-     *
      * r0 still holds the reentry type.
      */
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
     cmp     ip, #0                      @ want suspend?
-    beq     1f                          @ no, must be debugger/profiler
+    beq     3f                          @ no, must be something else
 
     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
 #if defined(WITH_JIT)
@@ -9787,77 +21284,86 @@
      * Refresh the Jit's cached copy of profile table pointer.  This pointer
      * doubles as the Jit's on/off switch.
      */
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
+    mov     r0, rSELF                  @ r0<- self
     ldr     r3, [r3] @ r3 <- pJitProfTable
     EXPORT_PC()                         @ need for precise GC
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh Jit's on/off switch
 #else
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     EXPORT_PC()                         @ need for precise GC
 #endif
     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
 
     /*
-     * Reload the debugger/profiler enable flags.  We're checking to see
-     * if either of these got set while we were suspended.
-     *
-     * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
-     * is enabled or not as the profiling will be done inline.
+     * Reload the interpBreak flags - they may have changed while we
+     * were suspended.
      */
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
+/* TUNING - direct load when InterpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak]   @ r1<- &interpBreak
+    ldr     r1, [r1]                    @ r1<- interpBreak
+3:
+    /*
+     * TODO: this code is too fragile.  Need a general mechanism
+     * to identify what actions to take by submode.  Some profiling modes
+     * (instruction count) need to single-step, while method tracing
+     * may not.  Debugging with breakpoints can run unfettered, but
+     * source-level single-stepping requires Dalvik singlestepping.
+     * GC may require a one-shot action and then full-speed resumption.
+     */
+    ands    r1, #(kSubModeDebuggerActive | kSubModeEmulatorTrace | kSubModeInstCounting)
+    bxeq    lr                          @ nothing to do, return
 
-#if !defined(WITH_INLINE_PROFILING)
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrs    r1, r1, r2
-#else
-    cmp     r1, #0                      @ only consult the debuggerActive flag
-#endif
-
-    beq     2f
-
-1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
-    str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
+    @ debugger/profiler enabled, bail out; self->entryPoint was set above
+    str     r0, [rSELF, #offThread_entryPoint]  @ store r0, need for debug/prof
     add     rPC, rPC, r9                @ update rPC
     mov     r1, #1                      @ "want switch" = true
     b       common_gotoBail             @ side exit
 
-2:
-    bx      lr                          @ nothing to do, return
-
 
 /*
  * The equivalent of "goto bail", this calls through the "bail handler".
  *
- * State registers will be saved to the "glue" area before bailing.
+ * State registers will be saved to the "thread" area before bailing.
  *
  * On entry:
  *  r1 is "bool changeInterp", indicating if we want to switch to the
  *     other interpreter or just bail all the way out
  */
 common_gotoBail:
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
-    mov     r0, rGLUE                   @ r0<- glue ptr
-    b       dvmMterpStdBail             @ call(glue, changeInterp)
+    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
+    mov     r0, rSELF                   @ r0<- self ptr
+    b       dvmMterpStdBail             @ call(self, changeInterp)
 
     @add     r1, r1, #1                  @ using (boolean+1)
-    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @add     r0, rSELF, #offThread_jmpBuf @ r0<- &self->jmpBuf
     @bl      _longjmp                    @ does not return
     @bl      common_abort
 
 
 /*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+    @ prepare to copy args to "outs" area of current frame
+    add     rPC, rPC, #4                @ adjust pc to make return consistent
+    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    cmp     r2, #0                      @ no args?
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+    b       .LinvokeRangeArgs           @ handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
@@ -9871,16 +21377,15 @@
     beq     .LinvokeArgsDone            @ if no args, skip the rest
     FETCH(r1, 2)                        @ r1<- CCCC
 
+.LinvokeRangeArgs:
     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
     @ (very few methods have > 10 args; could unroll for common cases)
     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
 1:  ldr     r1, [r3], #4                @ val = *fp++
     subs    r2, r2, #1                  @ count--
     str     r1, [r10], #4               @ *outs++ = val
     bne     1b                          @ ...while count != 0
-    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
     b       .LinvokeArgsDone
 
 /*
@@ -9895,11 +21400,9 @@
     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
-    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     beq     .LinvokeArgsDone
 
-    @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
 .LinvokeNonRange:
     rsb     r2, r2, #5                  @ r2<- 5-r2
     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
@@ -9926,7 +21429,9 @@
     str     r2, [r10, #-4]!             @ *--outs = vD
 0:  @ fall through to .LinvokeArgsDone
 
-.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
+.LinvokeArgsDone: @ r0=methodToCall
+    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
     @ find space for the new stack frame, check for overflow
@@ -9934,13 +21439,15 @@
     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
 @    bl      common_dumpRegs
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
     cmp     r3, r9                      @ bottom < interpStackEnd?
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
     blo     .LstackOverflow             @ yes, this frame will overflow stack
 
     @ set up newSaveArea
+    ldr     lr, [lr]                    @ lr<- active submodes
 #ifdef EASY_GDB
     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
     str     ip, [r10, #offStackSaveArea_prevSave]
@@ -9951,13 +21458,14 @@
     mov     r9, #0
     str     r9, [r10, #offStackSaveArea_returnAddr]
 #endif
-#if defined(WITH_INLINE_PROFILING)
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     1f                          @ skip if not
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    mov     r1, rSELF
+    @ r0=methodToCall, r1=rSELF
     bl      dvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
+1:
     str     r0, [r10, #offStackSaveArea_method]
     tst     r3, #ACC_NATIVE
     bne     .LinvokeNative
@@ -9980,18 +21488,17 @@
     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
     mov     rPC, r2                         @ publish new rPC
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update state values for the new method
+    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
 #if defined(WITH_JIT)
     GET_JIT_PROF_TABLE(r0)
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     cmp     r0,#0
     bne     common_updateProfile
     GOTO_OPCODE(ip)                         @ jump to next instruction
@@ -9999,22 +21506,23 @@
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     GOTO_OPCODE(ip)                         @ jump to next instruction
 #endif
 
 .LinvokeNative:
     @ Prep for the native call
     @ r0=methodToCall, r1=newFp, r10=newSaveArea
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [lr]                    @ lr<- active submodes
 
     mov     r2, r0                      @ r2<- methodToCall
     mov     r0, r1                      @ r0<- newFp (points to args)
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
 
 #ifdef ASSIST_DEBUGGER
     /* insert fake function header to help gdb find the stack frame */
@@ -10027,36 +21535,27 @@
 .Lskip:
 #endif
 
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=JNIMethod, r6=rGLUE
-    stmfd   sp!, {r2,r6}
-#endif
-
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    bne     330f                        @ hop if so
     mov     lr, pc                      @ set return addr
     ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-
-#if defined(WITH_INLINE_PROFILING)
-    @ r0=JNIMethod, r1=rGLUE
-    ldmfd   sp!, {r0-r1}
-    bl      dvmFastNativeMethodTraceExit
-#endif
-
+220:
 #if defined(WITH_JIT)
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ Refresh Jit's on/off status
 #endif
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
 #if defined(WITH_JIT)
     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
 #endif
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
 #if defined(WITH_JIT)
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh cached on/off switch
 #endif
     bne     common_exceptionThrown      @ no, handle exception
 
@@ -10064,13 +21563,26 @@
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+330:
+    @ r2=JNIMethod, r6=rSELF
+    stmfd   sp!, {r2,r6}
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ r0=JNIMethod, r1=rSELF
+    ldmfd   sp!, {r0-r1}
+    bl      dvmFastNativeMethodTraceExit
+    b       220b
+
 .LstackOverflow:    @ r0=methodToCall
     mov     r1, r0                      @ r1<- methodToCall
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmHandleStackOverflow
     b       common_exceptionThrown
 #ifdef ASSIST_DEBUGGER
     .fnend
+    .size   dalvik_mterp, .-dalvik_mterp
 #endif
 
 
@@ -10090,8 +21602,8 @@
     sub     sp, sp, #8                  @ space for args + pad
     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
     mov     r2, r0                      @ A2<- methodToCall
-    mov     r0, rGLUE                   @ A0<- glue
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rSELF                   @ A0<- self
+    SAVE_PC_FP_TO_SELF()                @ export state to "self"
     mov     r1, r9                      @ A1<- methodCallRange
     mov     r3, rINST, lsr #8           @ A3<- AA
     str     ip, [sp, #0]                @ A4<- ip
@@ -10113,19 +21625,21 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-#if defined(WITH_INLINE_PROFILING)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    SAVEAREA_FROM_FP(r0, rFP)
+    ldr     lr, [lr]                    @ lr<- active submodes
+    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     333f
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r0, r6
-    @ r0=rGlue
+    mov     r0, rSELF
+    @ r0=rSELF
     bl      dvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
-    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+333:
     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
-    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ is this a break frame?
 #if defined(WORKAROUND_CORTEX_A9_745320)
     /* Don't use conditional loads if the HW defect exists */
@@ -10139,14 +21653,14 @@
     beq     common_gotoBail             @ break frame, bail out completely
 
     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
 #if defined(WITH_JIT)
     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
-    str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
+    str     r1, [rSELF, #offThread_methodClassDex]
+    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
     cmp     r10, #0                      @ caller is compiled code
     blxne   r10
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -10154,7 +21668,7 @@
 #else
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
+    str     r1, [rSELF, #offThread_methodClassDex]
     GOTO_OPCODE(ip)                     @ jump to next instruction
 #endif
 
@@ -10163,8 +21677,8 @@
      */
      .if    0
 .LreturnOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_returnFromMethod
     b       common_resumeAfterGlueCall
     .endif
@@ -10187,13 +21701,12 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
-    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
-    mov     r1, r10                     @ r1<- self
+    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
+    mov     r1, rSELF                   @ r1<- self
     mov     r0, r9                      @ r0<- exception
     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
     mov     r3, #0                      @ r3<- NULL
-    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
 
     /* set up args and a local for "&fp" */
     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
@@ -10201,8 +21714,8 @@
     mov     ip, sp                      @ ip<- &fp
     mov     r3, #0                      @ r3<- false
     str     ip, [sp, #-4]!              @ *--sp = &fp
-    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
-    mov     r0, r10                     @ r0<- self
+    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
+    mov     r0, rSELF                   @ r0<- self
     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
     mov     r2, r9                      @ r2<- exception
     sub     r1, rPC, r1                 @ r1<- pc - method->insns
@@ -10212,11 +21725,11 @@
     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
 
     /* fix earlier stack overflow if necessary; may trash rFP */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
     beq     1f                          @ no, skip ahead
     mov     rFP, r0                     @ save relPc result in rFP
-    mov     r0, r10                     @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     mov     r1, r9                      @ r1<- exception
     bl      dvmCleanupStackOverflow     @ call(self)
     mov     r0, rFP                     @ restore result
@@ -10231,30 +21744,30 @@
     /* adjust locals to match self->curFrame and updated PC */
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
-    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    str     r1, [rSELF, #offThread_method]  @ self->method = new method
     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
-    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
 
     /* release the tracked alloc on the exception */
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
 
     /* restore the exception if the handler wants it */
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
-    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
-.LnotCaughtLocally: @ r9=exception, r10=self
+.LnotCaughtLocally: @ r9=exception
     /* fix stack overflow if necessary */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
-    movne   r0, r10                     @ if yes: r0<- self
+    movne   r0, rSELF                   @ if yes: r0<- self
     movne   r1, r9                      @ if yes: r1<- exception
     blne    dvmCleanupStackOverflow     @ if yes: call(self)
 
@@ -10263,14 +21776,14 @@
     /* call __android_log_print(prio, tag, format, ...) */
     /* "Exception %s from %s:%d not caught locally" */
     @ dvmLineNumFromPC(method, pc - method->insns)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     ldr     r1, [r0, #offMethod_insns]
     sub     r1, rPC, r1
     asr     r1, r1, #1
     bl      dvmLineNumFromPC
     str     r0, [sp, #-4]!
     @ dvmGetMethodSourceFile(method)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     bl      dvmGetMethodSourceFile
     str     r0, [sp, #-4]!
     @ exception->clazz->descriptor
@@ -10282,9 +21795,9 @@
     mov     r0, #3                      @ LOG_DEBUG
     bl      __android_log_print
 #endif
-    str     r9, [r10, #offThread_exception] @ restore exception
+    str     r9, [rSELF, #offThread_exception] @ restore exception
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
     mov     r1, #0                      @ "want switch" = false
     b       common_gotoBail             @ bail out
@@ -10295,8 +21808,8 @@
      */
     .if     0
 .LexceptionOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_exceptionThrown
     b       common_resumeAfterGlueCall
     .endif
@@ -10307,7 +21820,7 @@
  * values and start executing at the next instruction.
  */
 common_resumeAfterGlueCall:
-    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -10315,15 +21828,14 @@
 /*
  * Invalid array index. Note that our calling convention is strange; we use r1
  * and r3 because those just happen to be the registers all our callers are
- * using. We shuffle them here before calling the C function.
+ * using. We move r3 before calling the C function, but r1 happens to match.
  * r1: index
  * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
-    mov     r0, r1
-    mov     r1, r3
-    bl      dvmThrowAIOOBE
+    mov     r0, r3
+    bl      dvmThrowArrayIndexOutOfBoundsException
     b       common_exceptionThrown
 
 /*
@@ -10331,29 +21843,28 @@
  */
 common_errDivideByZero:
     EXPORT_PC()
-    ldr     r0, strArithmeticException
-    ldr     r1, strDivideByZero
-    bl      dvmThrowException
+    ldr     r0, strDivideByZero
+    bl      dvmThrowArithmeticException
     b       common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
  */
 common_errNegativeArraySize:
     EXPORT_PC()
-    ldr     r0, strNegativeArraySizeException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1                                @ arg0 <- len
+    bl      dvmThrowNegativeArraySizeException    @ (len)
     b       common_exceptionThrown
 
 /*
  * Invocation of a non-existent method.
+ * On entry: method name in r1
  */
 common_errNoSuchMethod:
     EXPORT_PC()
-    ldr     r0, strNoSuchMethodError
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1
+    bl      dvmThrowNoSuchMethodError
     b       common_exceptionThrown
 
 /*
@@ -10363,9 +21874,8 @@
  */
 common_errNullObject:
     EXPORT_PC()
-    ldr     r0, strNullPointerException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, #0
+    bl      dvmThrowNullPointerException
     b       common_exceptionThrown
 
 /*
@@ -10501,17 +22011,8 @@
  * String references, must be close to the code that uses them.
  */
     .align  2
-strArithmeticException:
-    .word   .LstrArithmeticException
 strDivideByZero:
     .word   .LstrDivideByZero
-strNegativeArraySizeException:
-    .word   .LstrNegativeArraySizeException
-strNoSuchMethodError:
-    .word   .LstrNoSuchMethodError
-strNullPointerException:
-    .word   .LstrNullPointerException
-
 strLogTag:
     .word   .LstrLogTag
 strExceptionNotCaughtLocally:
@@ -10539,23 +22040,10 @@
 
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
-.LstrDivideByZero:
-    .asciz  "divide by zero"
 .LstrFilledNewArrayNotImpl:
     .asciz  "filled-new-array only implemented for objects and 'int'"
-.LstrInternalError:
-    .asciz  "Ljava/lang/InternalError;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrNullPointerException:
-    .asciz  "Ljava/lang/NullPointerException;"
-
+.LstrDivideByZero:
+    .asciz  "divide by zero"
 .LstrLogTag:
     .asciz  "mterp"
 .LstrExceptionNotCaughtLocally:
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 489098a..db207a1 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -63,7 +63,7 @@
   reg nick      purpose
   r4  rPC       interpreted program counter, used for fetching instructions
   r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rGLUE     MterpGlue pointer
+  r6  rSELF     self (Thread) pointer
   r7  rINST     first 16-bit code unit of current instruction
   r8  rIBASE    interpreted instruction base pointer, used for computed goto
 
@@ -75,21 +75,21 @@
 /* single-purpose registers, given names for clarity */
 #define rPC     r4
 #define rFP     r5
-#define rGLUE   r6
+#define rSELF   r6
 #define rINST   r7
 #define rIBASE  r8
 
-/* save/restore the PC and/or FP from the glue struct */
-#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
-#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
-#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
-#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
-#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
-#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()     ldr     rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF()       str     rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF()     ldr     rFP, [rSELF, #offThread_fp]
+#define SAVE_FP_TO_SELF()       str     rFP, [rSELF, #offThread_fp]
+#define LOAD_PC_FP_FROM_SELF()  ldmia   rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF()    stmia   rSELF, {rPC, rFP}
 
 /*
  * "export" the PC to the stack frame, f/b/o future exception objects.  Must
- * be done *before* something calls dvmThrowException.
+ * be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -124,14 +124,14 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #((_count)*2)]!
 
 /*
  * The operation performed here is similar to FETCH_ADVANCE_INST, except the
  * src and dest registers are parameterized (not hard-wired to rPC and rINST).
  */
 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-        ldrh    _dreg, [_sreg, #(_count*2)]!
+        ldrh    _dreg, [_sreg, #((_count)*2)]!
 
 /*
  * Fetch the next instruction from an offset specified by _reg.  Updates
@@ -151,15 +151,15 @@
  *
  * The "_S" variant works the same but treats the value as signed.
  */
-#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
-#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #((_count)*2)]
 
 /*
  * Fetch one byte from an offset past the current PC.  Pass in the same
  * "_count" as you would for FETCH, and an additional 0/1 indicating which
  * byte of the halfword you want (lo/hi).
  */
-#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #((_count)*2+(_byte))]
 
 /*
  * Put the instruction's opcode field into the specified register.
@@ -186,8 +186,8 @@
 #define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
 
 #if defined(WITH_JIT)
-#define GET_JIT_PROF_TABLE(_reg)    ldr     _reg,[rGLUE,#offGlue_pJitProfTable]
-#define GET_JIT_THRESHOLD(_reg)     ldr     _reg,[rGLUE,#offGlue_jitThreshold]
+#define GET_JIT_PROF_TABLE(_reg)    ldr _reg,[rSELF,#offThread_pJitProfTable]
+#define GET_JIT_THRESHOLD(_reg)     ldr _reg,[rSELF,#offThread_jitThreshold]
 #endif
 
 /*
@@ -280,7 +280,7 @@
 
 /*
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *
  * This function returns a boolean "changeInterp" value.  The return comes
  * via a call to dvmMterpStdBail().
@@ -298,29 +298,28 @@
     MTERP_ENTRY2
 
     /* save stack pointer, add magic word for debuggerd */
-    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+    str     sp, [r0, #offThread_bailPtr]  @ save SP for eventual return
 
     /* set up "named" registers, figure out entry point */
-    mov     rGLUE, r0                   @ set rGLUE
-    ldr     r1, [r0, #offGlue_entryPoint]   @ enum is 4 bytes in aapcs-EABI
-    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
-    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    mov     rSELF, r0                   @ set rSELF
+    ldr     r1, [r0, #offThread_entryPoint]   @ enum is 4 bytes in aapcs-EABI
+    LOAD_PC_FP_FROM_SELF()              @ load rPC and rFP from "thread"
+    ldr     rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
     cmp     r1, #kInterpEntryInstr      @ usual case?
     bne     .Lnot_instr                 @ no, handle it
 
 #if defined(WITH_JIT)
 .LentryInstr:
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     /* Entry is always a possible trace start */
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     mov     r1, #0                      @ prepare the value for the new state
-    str     r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str     r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     cmp     r0,#0                       @ is profiling disabled?
 #if !defined(WITH_SELF_VERIFICATION)
     bne     common_updateProfile        @ profiling is enabled
 #else
-    ldr     r2, [r10, #offThread_shadowSpace]   @ to find out the jit exit state
+    ldr     r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
     beq     1f                          @ profiling is disabled
     ldr     r3, [r2, #offShadowSpace_jitExitState]  @ jit exit state
     cmp     r3, #kSVSTraceSelect        @ hot trace following?
@@ -350,20 +349,20 @@
 
 #if defined(WITH_JIT)
 .Lnot_throw:
-    ldr     r10,[rGLUE, #offGlue_jitResumeNPC]
-    ldr     r2,[rGLUE, #offGlue_jitResumeDPC]
+    ldr     r10,[rSELF, #offThread_jitResumeNPC]
+    ldr     r2,[rSELF, #offThread_jitResumeDPC]
     cmp     r1, #kInterpEntryResume     @ resuming after Jit single-step?
     bne     .Lbad_arg
     cmp     rPC,r2
     bne     .LentryInstr                @ must have branched, don't resume
 #if defined(WITH_SELF_VERIFICATION)
-    @ glue->entryPoint will be set in dvmSelfVerificationSaveState
+    @ self->entryPoint will be set in dvmSelfVerificationSaveState
     b       jitSVShadowRunStart         @ re-enter the translation after the
                                         @ single-stepped instruction
     @noreturn
 #endif
     mov     r1, #kInterpEntryInstr
-    str     r1, [rGLUE, #offGlue_entryPoint]
+    str     r1, [rSELF, #offThread_entryPoint]
     bx      r10                         @ re-enter the translation
 #endif
 
@@ -373,6 +372,7 @@
     bl      printf
     bl      dvmAbort
     .fnend
+    .size   dvmMterpStdRun, .-dvmMterpStdRun
 
 
     .global dvmMterpStdBail
@@ -388,11 +388,11 @@
  * LR to PC.
  *
  * On entry:
- *  r0  MterpGlue* glue
+ *  r0  Thread* self
  *  r1  bool changeInterp
  */
 dvmMterpStdBail:
-    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    ldr     sp, [r0, #offThread_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
     ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
@@ -575,7 +575,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -586,7 +586,7 @@
 /* File: armv5te/OP_MOVE_RESULT_WIDE.S */
     /* move-result-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
@@ -603,7 +603,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
-    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    ldr     r0, [rSELF, #offThread_retval]    @ r0<- self->retval.i
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SET_VREG(r0, r2)                    @ fp[AA]<- r0
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -614,14 +614,13 @@
 .L_OP_MOVE_EXCEPTION: /* 0x0d */
 /* File: armv5te/OP_MOVE_EXCEPTION.S */
     /* move-exception vAA */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    ldr     r3, [rSELF, #offThread_exception]  @ r3<- dvmGetException bypass
     mov     r1, #0                      @ r1<- 0
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
-    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    str     r1, [rSELF, #offThread_exception]  @ dvmClearException bypass
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /* ------------------------------ */
@@ -635,7 +634,7 @@
 .L_OP_RETURN: /* 0x0f */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -643,7 +642,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 /* ------------------------------ */
@@ -651,13 +650,13 @@
 .L_OP_RETURN_WIDE: /* 0x10 */
 /* File: armv5te/OP_RETURN_WIDE.S */
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
-    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r3, rSELF, #offThread_retval  @ r3<- &self->retval
     ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
     stmia   r3, {r0-r1}                 @ retval<- r0/r1
     b       common_returnFromMethod
@@ -668,7 +667,7 @@
 /* File: armv5te/OP_RETURN_OBJECT.S */
 /* File: armv5te/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "thread"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
@@ -676,7 +675,7 @@
     /* op vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r0, r2)                    @ r0<- vAA
-    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    str     r0, [rSELF, #offThread_retval] @ retval.i <- vAA
     b       common_returnFromMethod
 
 
@@ -801,7 +800,7 @@
 /* File: armv5te/OP_CONST_STRING.S */
     /* const/string vAA, String@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
@@ -819,7 +818,7 @@
     /* const/string vAA, String@BBBBBBBB */
     FETCH(r0, 1)                        @ r0<- bbbb (low)
     FETCH(r1, 2)                        @ r1<- BBBB (high)
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
     orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
@@ -837,7 +836,7 @@
 /* File: armv5te/OP_CONST_CLASS.S */
     /* const/class vAA, Class@BBBB */
     FETCH(r1, 1)                        @ r1<- BBBB
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]  @ r2<- self->methodClassDex
     mov     r9, rINST, lsr #8           @ r9<- AA
     ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
     ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
@@ -858,18 +857,12 @@
     /* monitor-enter vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     cmp     r1, #0                      @ null object?
-    EXPORT_PC()                         @ need for precise GC, MONITOR_TRACKING
+    EXPORT_PC()                         @ need for precise GC
     beq     common_errNullObject        @ null object, throw an exception
     FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
     bl      dvmLockObject               @ call(self, obj)
-#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
-    ldr     r1, [r0, #offThread_exception] @ check for exception
-    cmp     r1, #0
-    bne     common_exceptionThrown      @ exception raised, bail out
-#endif
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
@@ -890,7 +883,7 @@
     GET_VREG(r1, r2)                    @ r1<- vAA (object)
     cmp     r1, #0                      @ null object?
     beq     1f                          @ yes
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
     cmp     r0, #0                      @ failed?
     FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
@@ -912,7 +905,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     FETCH(r2, 1)                        @ r2<- BBBB
     GET_VREG(r9, r3)                    @ r9<- object
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
     cmp     r9, #0                      @ is object null?
     ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
     beq     .LOP_CHECK_CAST_okay            @ null obj, cast always succeeds
@@ -944,7 +937,7 @@
     GET_VREG(r0, r3)                    @ r0<- vB (object)
     and     r9, r9, #15                 @ r9<- A
     cmp     r0, #0                      @ is object null?
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
     beq     .LOP_INSTANCE_OF_store           @ null obj, not an instance, store r0
     FETCH(r3, 1)                        @ r3<- CCCC
     ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
@@ -983,7 +976,7 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
@@ -1013,12 +1006,12 @@
     /* new-array vA, vB, class@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
     FETCH(r2, 1)                        @ r2<- CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     GET_VREG(r1, r0)                    @ r1<- vB (array length)
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     cmp     r1, #0                      @ check length
     ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
-    bmi     common_errNegativeArraySize @ negative length, bail
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ req'd for resolve, alloc
     bne     .LOP_NEW_ARRAY_finish          @ resolved, continue
@@ -1035,7 +1028,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1043,7 +1036,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1063,7 +1056,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
     EXPORT_PC()                         @ need for resolve and alloc
@@ -1071,7 +1064,7 @@
     mov     r10, rINST, lsr #8          @ r10<- AA or BA
     cmp     r0, #0                      @ already resolved?
     bne     .LOP_FILLED_NEW_ARRAY_RANGE_continue        @ yes, continue on
-8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- call(clazz, ref)
@@ -1109,12 +1102,11 @@
     /* throw vAA */
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
     EXPORT_PC()                         @ exception handler can throw
     cmp     r1, #0                      @ null object?
     beq     common_errNullObject        @ yes, throw an NPE instead
     @ bypass dvmSetException, just store it
-    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    str     r1, [rSELF, #offThread_exception]  @ thread->exception<- obj
     b       common_exceptionThrown
 
 /* ------------------------------ */
@@ -2392,14 +2384,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2416,14 +2408,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2443,14 +2435,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2472,14 +2464,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2501,14 +2493,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2530,14 +2522,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2559,14 +2551,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2586,14 +2578,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2607,14 +2599,14 @@
 /* File: armv6t2/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2633,14 +2625,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2661,14 +2653,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BOOLEAN_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2690,14 +2682,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_BYTE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2719,14 +2711,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_CHAR_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2748,14 +2740,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_SHORT_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -2774,7 +2766,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2797,7 +2789,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2828,7 +2820,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2855,7 +2847,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2882,7 +2874,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2909,7 +2901,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2936,7 +2928,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2962,7 +2954,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -2985,7 +2977,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -3015,13 +3007,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -3041,7 +3033,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3068,7 +3060,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3095,7 +3087,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3122,7 +3114,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -3149,7 +3141,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3160,7 +3152,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3180,7 +3172,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3189,7 +3181,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3213,7 +3205,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3241,14 +3233,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodNoRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3274,9 +3266,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -3304,7 +3296,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3315,7 +3307,7 @@
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ yes, continue on
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_VIRTUAL         @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3337,7 +3329,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -3346,7 +3338,7 @@
     GET_VREG(r2, r10)                   @ r2<- "this" ptr
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
     cmp     r2, #0                      @ null "this"?
-    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
     beq     common_errNullObject        @ null "this", throw exception
     cmp     r0, #0                      @ already resolved?
     ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
@@ -3372,7 +3364,7 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
@@ -3402,14 +3394,14 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
     FETCH(r1, 1)                        @ r1<- BBBB
     ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
     ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
     cmp     r0, #0                      @ already resolved?
     EXPORT_PC()                         @ must export for invoke
     bne     common_invokeMethodRange @ yes, continue on
-0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_STATIC          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -3437,9 +3429,9 @@
     .endif
     EXPORT_PC()                         @ must export for invoke
     GET_VREG(r0, r2)                    @ r0<- first arg ("this")
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
     cmp     r0, #0                      @ null obj?
-    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
     beq     common_errNullObject        @ yes, fail
     ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
     bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
@@ -7051,14 +7043,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7079,14 +7071,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7106,7 +7098,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7133,7 +7125,7 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7161,14 +7153,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7187,14 +7179,14 @@
      */
     /* iget-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IGET_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7210,14 +7202,14 @@
 /* File: armv5te/OP_IPUT_WIDE.S */
     /* iput-wide vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_WIDE_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7235,7 +7227,7 @@
      * 64-bit SGET handler.
      */
     /* sget-wide vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7265,7 +7257,7 @@
      * 64-bit SPUT handler.
      */
     /* sput-wide vAA, field@BBBB */
-    ldr     r0, [rGLUE, #offGlue_methodClassDex]  @ r0<- DvmDex
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
     mov     r9, rINST, lsr #8           @ r9<- AA
@@ -7304,7 +7296,7 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     FETCH(r2, 1)                        @ r2<- BBBB
     EXPORT_PC()                         @ export the PC
     mov     r1, rINST, lsr #8           @ r1<- AA
@@ -7327,11 +7319,11 @@
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #12          @ r0<- B
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7357,11 +7349,11 @@
      */
     /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
     FETCH(r10, 1)                       @ r10<- BBBB
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &self->retval
     EXPORT_PC()                         @ can throw
     sub     sp, sp, #8                  @ make room for arg, +64 bit align
     mov     r0, rINST, lsr #8           @ r0<- AA
-    str     r1, [sp]                    @ push &glue->retval
+    str     r1, [sp]                    @ push &self->retval
     bl      .LOP_EXECUTE_INLINE_RANGE_continue        @ make call; will return after
     add     sp, sp, #8                  @ pop stack
     cmp     r0, #0                      @ test boolean result of inline
@@ -7372,12 +7364,23 @@
 
 /* ------------------------------ */
     .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
     /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
      */
-    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    FETCH(r1, 2)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(2+1)       @ advance to next instr, load rINST
     GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
     GOTO_OPCODE(ip)                     @ execute it
 
@@ -7495,7 +7498,7 @@
     beq     common_errNullObject        @ object was null
     and     r2, r2, #15
     GET_VREG(r0, r2)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
     cmp     r0, #0
@@ -7567,7 +7570,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!0)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7595,7 +7598,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     FETCH(r10, 2)                       @ r10<- GFED or CCCC
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     .if     (!1)
     and     r10, r10, #15               @ r10<- D (or stays CCCC)
     .endif
@@ -7623,14 +7626,14 @@
      */
     /* op vA, vB, field@CCCC */
     mov     r0, rINST, lsr #12          @ r0<- B
-    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref CCCC
     ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
     GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_IPUT_OBJECT_VOLATILE_finish          @ no, already resolved
-8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveInstField         @ r0<- resolved InstField ptr
@@ -7650,7 +7653,7 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
@@ -7677,13 +7680,13 @@
      * for: sput-object, sput-object-volatile
      */
     /* op vAA, field@BBBB */
-    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
     FETCH(r1, 1)                        @ r1<- field ref BBBB
     ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
     ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
     cmp     r0, #0                      @ is resolved entry null?
     bne     .LOP_SPUT_OBJECT_VOLATILE_finish          @ no, continue
-    ldr     r9, [rGLUE, #offGlue_method]    @ r9<- current method
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -7697,10 +7700,3110 @@
     .balign 64
 .L_OP_DISPATCH_FF: /* 0xff */
 /* File: armv5te/OP_DISPATCH_FF.S */
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<-self>methodClassDex
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[AAAAaaaa]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_CLASS_JUMBO_resolve
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r3, 3)                        @ r3<- BBBB
+    orr     r2, r0, r2, lsl #16         @ r2<- AAAAaaaa
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rSELF, #offThread_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .LOP_CHECK_CAST_JUMBO_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_CHECK_CAST_JUMBO_resolve         @ not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .LOP_CHECK_CAST_JUMBO_fullcheck       @ no, do full check
+    b       .LOP_CHECK_CAST_JUMBO_okay            @ yes, finish up
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    FETCH(r9, 3)                        @ r9<- vBBBB
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- pDvmDex
+    cmp     r0, #0                      @ is object null?
+    beq     .LOP_INSTANCE_OF_JUMBO_store           @ null obj, not an instance, store r0
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    orr     r3, r1, r3, lsl #16         @ r3<- AAAAaaaa
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_INSTANCE_OF_JUMBO_resolve         @ not resolved, do it now
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .LOP_NEW_INSTANCE_JUMBO_resolve         @ no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .LOP_NEW_INSTANCE_JUMBO_needinit        @ no, init class now
+.LOP_NEW_INSTANCE_JUMBO_initialized: @ r0=class
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    bl      dvmAllocObject              @ r0<- new object
+    b       .LOP_NEW_INSTANCE_JUMBO_finish          @ continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(r2, 1)                        @ r2<- aaaa (lo)
+    FETCH(r3, 2)                        @ r3<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- vCCCC
+    orr     r2, r2, r3, lsl #16         @ r2<- AAAAaaaa
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vCCCC (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail - len in r1
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .LOP_NEW_ARRAY_JUMBO_finish          @ resolved, continue
+    b       .LOP_NEW_ARRAY_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ need for resolve and alloc
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_continue        @ yes, continue on
+8:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/OP_IGET_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/OP_IGET_BYTE_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/OP_IGET_CHAR_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/OP_IGET_SHORT_JUMBO.S */
+@include "armv5te/OP_IGET_JUMBO.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_JUMBO_resolved        @ resolved, continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/OP_IPUT_BOOLEAN_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BOOLEAN_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BOOLEAN_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/OP_IPUT_BYTE_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BYTE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_BYTE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/OP_IPUT_CHAR_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_CHAR_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_CHAR_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/OP_IPUT_SHORT_JUMBO.S */
+@include "armv5te/OP_IPUT_JUMBO.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_SHORT_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_SHORT_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 0
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BOOLEAN_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/OP_SGET_BYTE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_BYTE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/OP_SGET_CHAR_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_CHAR_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/OP_SGET_SHORT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_SHORT_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 0
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BOOLEAN_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BOOLEAN_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/OP_SPUT_BYTE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BYTE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_BYTE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/OP_SPUT_CHAR_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_CHAR_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_CHAR_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/OP_SPUT_SHORT_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_SHORT_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_SHORT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a virtual method call.
+     */
+    /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ yes, continue on
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rSELF, #offThread_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_JUMBO_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r10, 4)                       @ r10<- CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_JUMBO_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_JUMBO_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodJumbo    @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r0, 1)                        @ r1<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodJumbo    @ yes, continue on
+0:  ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodJumbo    @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(r2, 4)                        @ r2<- CCCC
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    EXPORT_PC()                         @ must export for invoke
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rSELF, #offThread_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodJumbo    @ jump to common handler
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/OP_UNUSED_27FF.S */
 /* File: armv5te/unused.S */
     bl      common_abort
 
 
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/OP_UNUSED_28FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/OP_UNUSED_29FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/OP_UNUSED_2AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/OP_UNUSED_2BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/OP_UNUSED_2CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/OP_UNUSED_2DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/OP_UNUSED_2EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/OP_UNUSED_2FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/OP_UNUSED_30FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/OP_UNUSED_31FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/OP_UNUSED_32FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/OP_UNUSED_33FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/OP_UNUSED_34FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/OP_UNUSED_35FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/OP_UNUSED_36FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/OP_UNUSED_37FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/OP_UNUSED_38FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/OP_UNUSED_39FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/OP_UNUSED_3AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/OP_UNUSED_3BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/OP_UNUSED_3CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/OP_UNUSED_3DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/OP_UNUSED_3EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/OP_UNUSED_3FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/OP_UNUSED_40FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/OP_UNUSED_41FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/OP_UNUSED_42FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/OP_UNUSED_43FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/OP_UNUSED_44FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/OP_UNUSED_45FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/OP_UNUSED_46FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/OP_UNUSED_47FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/OP_UNUSED_48FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/OP_UNUSED_49FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/OP_UNUSED_4AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/OP_UNUSED_4BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/OP_UNUSED_4CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/OP_UNUSED_4DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/OP_UNUSED_4EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/OP_UNUSED_4FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/OP_UNUSED_50FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/OP_UNUSED_51FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/OP_UNUSED_52FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/OP_UNUSED_53FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/OP_UNUSED_54FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/OP_UNUSED_55FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/OP_UNUSED_56FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/OP_UNUSED_57FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/OP_UNUSED_58FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/OP_UNUSED_59FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/OP_UNUSED_5AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/OP_UNUSED_5BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/OP_UNUSED_5CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/OP_UNUSED_5DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/OP_UNUSED_5EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/OP_UNUSED_5FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/OP_UNUSED_60FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/OP_UNUSED_61FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/OP_UNUSED_62FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/OP_UNUSED_63FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/OP_UNUSED_64FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/OP_UNUSED_65FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/OP_UNUSED_66FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/OP_UNUSED_67FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/OP_UNUSED_68FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/OP_UNUSED_69FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/OP_UNUSED_6AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/OP_UNUSED_6BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/OP_UNUSED_6CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/OP_UNUSED_6DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/OP_UNUSED_6EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/OP_UNUSED_6FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/OP_UNUSED_70FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/OP_UNUSED_71FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/OP_UNUSED_72FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/OP_UNUSED_73FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/OP_UNUSED_74FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/OP_UNUSED_75FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/OP_UNUSED_76FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/OP_UNUSED_77FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/OP_UNUSED_78FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/OP_UNUSED_79FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/OP_UNUSED_7AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/OP_UNUSED_7BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/OP_UNUSED_7CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/OP_UNUSED_7DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/OP_UNUSED_7EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/OP_UNUSED_7FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/OP_UNUSED_80FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/OP_UNUSED_81FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/OP_UNUSED_82FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/OP_UNUSED_83FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/OP_UNUSED_84FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/OP_UNUSED_85FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/OP_UNUSED_86FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/OP_UNUSED_87FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/OP_UNUSED_88FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/OP_UNUSED_89FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/OP_UNUSED_8AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/OP_UNUSED_8BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/OP_UNUSED_8CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/OP_UNUSED_8DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/OP_UNUSED_8EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/OP_UNUSED_8FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/OP_UNUSED_90FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/OP_UNUSED_91FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/OP_UNUSED_92FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/OP_UNUSED_93FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/OP_UNUSED_94FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/OP_UNUSED_95FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/OP_UNUSED_96FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/OP_UNUSED_97FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/OP_UNUSED_98FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/OP_UNUSED_99FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/OP_UNUSED_9AFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/OP_UNUSED_9BFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/OP_UNUSED_9CFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/OP_UNUSED_9DFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/OP_UNUSED_9EFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/OP_UNUSED_9FFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/OP_UNUSED_A0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/OP_UNUSED_A1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/OP_UNUSED_A2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/OP_UNUSED_A3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/OP_UNUSED_A4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/OP_UNUSED_A5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/OP_UNUSED_A6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/OP_UNUSED_A7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/OP_UNUSED_A8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/OP_UNUSED_A9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/OP_UNUSED_AAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/OP_UNUSED_ABFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/OP_UNUSED_ACFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/OP_UNUSED_ADFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/OP_UNUSED_AEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/OP_UNUSED_AFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/OP_UNUSED_B0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/OP_UNUSED_B1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/OP_UNUSED_B2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/OP_UNUSED_B3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/OP_UNUSED_B4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/OP_UNUSED_B5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/OP_UNUSED_B6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/OP_UNUSED_B7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/OP_UNUSED_B8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/OP_UNUSED_B9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/OP_UNUSED_BAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/OP_UNUSED_BBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/OP_UNUSED_BCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/OP_UNUSED_BDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/OP_UNUSED_BEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/OP_UNUSED_BFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/OP_UNUSED_C0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/OP_UNUSED_C1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/OP_UNUSED_C2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/OP_UNUSED_C3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/OP_UNUSED_C4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/OP_UNUSED_C5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/OP_UNUSED_C6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/OP_UNUSED_C7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/OP_UNUSED_C8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/OP_UNUSED_C9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/OP_UNUSED_CAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/OP_UNUSED_CBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/OP_UNUSED_CCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/OP_UNUSED_CDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/OP_UNUSED_CEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/OP_UNUSED_CFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/OP_UNUSED_D0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/OP_UNUSED_D1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/OP_UNUSED_D2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/OP_UNUSED_D3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/OP_UNUSED_D4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/OP_UNUSED_D5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/OP_UNUSED_D6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/OP_UNUSED_D7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/OP_UNUSED_D8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/OP_UNUSED_D9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/OP_UNUSED_DAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/OP_UNUSED_DBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/OP_UNUSED_DCFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/OP_UNUSED_DDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/OP_UNUSED_DEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/OP_UNUSED_DFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/OP_UNUSED_E0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/OP_UNUSED_E1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/OP_UNUSED_E2FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/OP_UNUSED_E3FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/OP_UNUSED_E4FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/OP_UNUSED_E5FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/OP_UNUSED_E6FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/OP_UNUSED_E7FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/OP_UNUSED_E8FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/OP_UNUSED_E9FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/OP_UNUSED_EAFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/OP_UNUSED_EBFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/OP_UNUSED_ECFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/OP_UNUSED_EDFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/OP_UNUSED_EEFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/OP_UNUSED_EFFF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/OP_UNUSED_F0FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/OP_UNUSED_F1FF.S */
+/* File: armv5te/unused.S */
+    bl      common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it (we know a debugger isn't active).
+     */
+    FETCH(r1, 4)                  @ r1<- CCCC
+    GET_VREG(r0, r1)                    @ r0<- "this" ptr
+    cmp     r0, #0                      @ check for NULL
+    beq     common_errNullObject        @ export PC and throw NPE
+    ldr     r1, [r0, #offObject_clazz]  @ r1<- obj->clazz
+    ldr     r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+    tst     r2, #CLASS_ISFINALIZABLE    @ is this class finalizable?
+    beq     1f                          @ nope, done
+    bl      dvmSetFinalizable           @ call dvmSetFinalizable(obj)
+1:  FETCH_ADVANCE_INST(4+1)       @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/OP_IGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+    ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    FETCH(r0, 4)                        @ r0<- CCCC
+    ldr     r3, [rSELF, #offThread_methodClassDex]    @ r3<- DvmDex
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[CCCC], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, already resolved
+8:  ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    b       .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved        @ resolved, continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/OP_SGET_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(r9, 3)                        @ r9<- BBBB
+    .if 1
+    add     r0, r0, #offStaticField_value @ r0<- pointer to data
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+    .endif
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    stmia   r9, {r0-r1}                 @ vBBBB/vBBBB+1<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SGET_OBJECT_JUMBO.S */
+/* File: armv5te/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[BBBB]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value] @ field<- vBBBB
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r0, [rSELF, #offThread_methodClassDex]  @ r0<- DvmDex
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields
+    orr     r1, r1, r2, lsl #16         @ r1<- AAAAaaaa
+    FETCH(r9, 3)                        @ r9<- BBBB
+    ldr     r2, [r0, r1, lsl #2]        @ r2<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BBBB]
+    cmp     r2, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish: @ field ptr in r2, BBBB in r9
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBBBB/vBBBB+1
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if 1
+    add     r2, r2, #offStaticField_value @ r2<- pointer to data
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r2, #offStaticField_value] @ field<- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: armv5te/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    ldr     r2, [rSELF, #offThread_methodClassDex]    @ r2<- DvmDex
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    orr     r1, r0, r1, lsl #16         @ r1<- AAAAaaaa
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ no, continue
+    ldr     r9, [rSELF, #offThread_method]    @ r9<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r9, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, Class@AAAAAAAA */
+    FETCH(r1, 1)                        @ r1<- aaaa (lo)
+    FETCH(r2, 2)                        @ r2<- AAAA (hi)
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    orr     r2, r1, r2, lsl #16         @ r2<- AAAAaaaa
+    EXPORT_PC()                         @ export the PC
+    FETCH(r1, 3)                        @ r1<- BBBB
+    bl      dvmThrowVerificationError   @ always throws
+    b       common_exceptionThrown      @ handle exception
 
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
@@ -7727,7 +10830,7 @@
      */
 .LOP_CONST_STRING_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7746,7 +10849,7 @@
      */
 .LOP_CONST_STRING_JUMBO_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveString            @ r0<- String reference
     cmp     r0, #0                      @ failed?
@@ -7765,7 +10868,7 @@
      */
 .LOP_CONST_CLASS_resolve:
     EXPORT_PC()
-    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- Class reference
@@ -7805,7 +10908,7 @@
      */
 .LOP_CHECK_CAST_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r1, r2                      @ r1<- BBBB
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
@@ -7858,7 +10961,7 @@
      */
 .LOP_INSTANCE_OF_resolve:
     EXPORT_PC()                         @ resolve() could throw
-    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
     mov     r1, r3                      @ r1<- BBBB
     mov     r2, #1                      @ r2<- true
     ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
@@ -7902,7 +11005,7 @@
      *  r1 holds BBBB
      */
 .LOP_NEW_INSTANCE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r2, #0                      @ r2<- false
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
@@ -7910,9 +11013,6 @@
     bne     .LOP_NEW_INSTANCE_resolved        @ no, continue
     b       common_exceptionThrown      @ yes, handle exception
 
-.LstrInstantiationErrorPtr:
-    .word   .LstrInstantiationError
-
 /* continuation for OP_NEW_ARRAY */
 
 
@@ -7923,7 +11023,7 @@
      *  r2 holds class ref CCCC
      */
 .LOP_NEW_ARRAY_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     mov     r9, r1                      @ r9<- length (save)
     mov     r1, r2                      @ r1<- CCCC
     mov     r2, #0                      @ r2<- false
@@ -7978,8 +11078,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8011,9 +11111,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8024,16 +11124,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!0)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_FILLED_NEW_ARRAY_RANGE */
@@ -8062,8 +11159,8 @@
     beq     common_exceptionThrown      @ alloc failed, handle exception
 
     FETCH(r1, 2)                        @ r1<- FEDC or CCCC
-    str     r0, [rGLUE, #offGlue_retval]      @ retval.l <- new array
-    str     rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
     add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
     subs    r9, r9, #1                  @ length--, check for neg
     FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
@@ -8095,9 +11192,9 @@
     .endif
 
 2:
-    ldr     r0, [rGLUE, #offGlue_retval]     @ r0<- object
-    ldr     r1, [rGLUE, #offGlue_retval+4]   @ r1<- type
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
     cmp     r1, #'I'                         @ Is int array?
     strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
@@ -8108,16 +11205,13 @@
      * mode of filled-new-array.
      */
 .LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
-    ldr     r0, .L_strInternalError
-    ldr     r1, .L_strFilledNewArrayNotImpl
-    bl      dvmThrowException
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
     b       common_exceptionThrown
 
     .if     (!1)                 @ define in one or the other, not both
 .L_strFilledNewArrayNotImpl:
     .word   .LstrFilledNewArrayNotImpl
-.L_strInternalError:
-    .word   .LstrInternalError
     .endif
 
 /* continuation for OP_CMPL_FLOAT */
@@ -8197,7 +11291,7 @@
     beq     .LOP_APUT_OBJECT_throw           @ no
     mov     r1, rINST                   @ r1<- arrayObj
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
-    ldr     r2, [rGLUE, #offGlue_cardTable]     @ get biased CT base
+    ldr     r2, [rSELF, #offThread_cardTable]     @ get biased CT base
     add     r10, #offArrayObject_contents   @ r0<- pointer to slot
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     str     r9, [r10]                   @ vBB[vCC]<- vAA
@@ -8411,7 +11505,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -8512,7 +11606,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8529,7 +11623,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8544,7 +11638,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8559,7 +11653,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8574,7 +11668,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8589,7 +11683,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8604,7 +11698,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8619,7 +11713,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8637,7 +11731,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8651,7 +11745,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     @ no-op                             @ releasing store
@@ -8667,7 +11761,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BOOLEAN_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8682,7 +11776,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_BYTE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8697,7 +11791,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_CHAR_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8712,7 +11806,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_SHORT_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -8779,7 +11873,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -8847,7 +11941,7 @@
      *  r10 = "this" register
      */
 .LOP_INVOKE_DIRECT_RANGE_resolve:
-    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
     ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
     mov     r2, #METHOD_DIRECT          @ resolver method type
     bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
@@ -9044,7 +12138,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9059,7 +12153,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SPUT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9147,7 +12241,7 @@
      * Returns StaticField pointer in r0.
      */
 .LOP_SGET_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9165,7 +12259,7 @@
      * Returns StaticField pointer in r2.
      */
 .LOP_SPUT_WIDE_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9252,7 +12346,7 @@
     and     r1, r1, #15                 @ r1<- A
     cmp     r9, #0                      @ check object for null
     GET_VREG(r0, r1)                    @ r0<- fp[A]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     beq     common_errNullObject        @ object was null
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -9269,7 +12363,7 @@
      *  r1: BBBB field ref
      */
 .LOP_SGET_OBJECT_VOLATILE_resolve:
-    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
     EXPORT_PC()                         @ resolve() could throw, so export now
     ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
     bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
@@ -9282,7 +12376,7 @@
     mov     r2, rINST, lsr #8           @ r2<- AA
     FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
     GET_VREG(r1, r2)                    @ r1<- fp[AA]
-    ldr     r2, [rGLUE, #offGlue_cardTable]  @ r2<- card table base
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
     ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     SMP_DMB                            @ releasing store
@@ -9291,10 +12385,8455 @@
     strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+/* continuation for OP_CONST_CLASS_JUMBO */
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: AAAAAAAA (Class ref)
+     *  r9: target register
+     */
+.LOP_CONST_CLASS_JUMBO_resolve:
+    EXPORT_PC()
+    ldr     r0, [rSELF, #offThread_method] @ r0<- self->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds desired class resolved from AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    mov     r10, r1                     @ avoid ClassObject getting clobbered
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .LOP_CHECK_CAST_JUMBO_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC()                         @ about to throw
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz (actual class)
+    mov     r1, r10                     @ r1<- desired class
+    bl      dvmThrowClassCastException
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get the next opcode.
+     */
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds AAAAAAAA
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_CHECK_CAST_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+    /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .LOP_INSTANCE_OF_JUMBO_trivial         @ yes, trivial finish
+    @ fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to OP_INSTANCE_OF_JUMBO_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b OP_INSTANCE_OF_JUMBO_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vBBBB<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rSELF, #offThread_method]    @ r0<- self->method
+    mov     r1, r3                      @ r1<- AAAAAAAA
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    FETCH(r3, 4)                        @ r3<- vCCCC
+    mov     r1, r0                      @ r1<- class resolved from AAAAAAAA
+    GET_VREG(r0, r3)                    @ r0<- vCCCC (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_INSTANCE_OF_JUMBO_resolved        @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+    .balign 32                          @ minimize cache lines
+.LOP_NEW_INSTANCE_JUMBO_finish: @ r0=new object
+    FETCH(r3, 3)                        @ r3<- BBBB
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .LOP_NEW_INSTANCE_JUMBO_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds AAAAAAAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_NEW_INSTANCE_JUMBO_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref AAAAAAAA
+     */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- AAAAAAAA
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to OP_NEW_ARRAY_JUMBO_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    FETCH(r2, 3)                        @ r2<- vBBBB
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vBBBB<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    rINST, [r3, #1]             @ rINST<- descriptor[1]
+    FETCH(r1, 3)                        @ r1<- BBBB (length)
+    cmp     rINST, #'I'                 @ array of ints?
+    cmpne   rINST, #'L'                 @ array of objects?
+    cmpne   rINST, #'['                 @ array of arrays?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl         @ no, not handled yet
+    bl      dvmAllocArrayByClass        @ r0<- call(arClass, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 4)                        @ r1<- CCCC
+    str     r0, [rSELF, #offThread_retval]      @ retval.l <- new array
+    str     rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(5)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC, r9=BBBB (length)
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+
+2:  ldr     r0, [rSELF, #offThread_retval]     @ r0<- object
+    ldr     r1, [rSELF, #offThread_retval+4]   @ r1<- type
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    GET_INST_OPCODE(ip)                      @ ip<- opcode from rINST
+    cmp     r1, #'I'                         @ Is int array?
+    strneb  r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+    GOTO_OPCODE(ip)                          @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    ldr     r0, .L_strFilledNewArrayNotImpl
+    bl      dvmThrowInternalError
+    b       common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     0
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    @ no-op                             @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     0
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    @bl      common_squeak1
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_BYTE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_finish:
+    @bl      common_squeak2
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_CHAR_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_finish:
+    @bl      common_squeak3
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_SHORT_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_finish:
+    @bl      common_squeak4
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+
+.LOP_SPUT_OBJECT_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    @ no-op                             @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BOOLEAN_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BOOLEAN_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_BYTE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BYTE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_CHAR_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_CHAR_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_SHORT_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_SHORT_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    FETCH(r10, 4)                       @ r10<- CCCC
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_JUMBO_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodJumbo    @ continue on
+
+.LOP_INVOKE_SUPER_JUMBO_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_JUMBO_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_JUMBO */
+
+    /*
+     * On entry:
+     *  r1 = reference (CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_JUMBO_resolve:
+    ldr     r3, [rSELF, #offThread_method] @ r3<- self->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_JUMBO_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    .if     1
+    add     r0, r9, r3                  @ r0<- address of field
+    bl      dvmQuasiAtomicRead64        @ r0/r1<- contents of field
+    .else
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    .endif
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[BBBB]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+    cmp     r0, #0                      @ resolution unsuccessful?
+    beq     common_exceptionThrown      @ yes, throw exception
+    @ fall through to OP_IGET_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    SMP_DMB                            @ acquiring load
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    SET_VREG(r0, r2)                    @ fp[BBBB]<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+    cmp     r9, #0                      @ check object for null
+    FETCH(r2, 3)                        @ r1<- BBBB
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[BBBB]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    GET_INST_OPCODE(r10)                @ extract opcode from rINST
+    .if     1
+    add     r2, r9, r3                  @ r2<- target address
+    bl      dvmQuasiAtomicSwap64        @ stores r0/r1 into addr r2
+    .else
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    .endif
+    GOTO_OPCODE(r10)                    @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+     cmp     r0, #0                     @ resolution unsuccessful?
+     beq     common_exceptionThrown     @ yes, throw exception
+     @ fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+    @bl      common_squeak0
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    FETCH(r1, 3)                        @ r1<- BBBB
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r0, [r9, r3]                @ obj.field (32 bits)<- r0
+    cmp     r0, #0                      @ stored a null reference?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card if not
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in r0.
+     */
+.LOP_SGET_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     */
+.LOP_SPUT_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: AAAAAAAA field ref
+     *  r9: &fp[BBBB]
+     *
+     * Returns StaticField pointer in r2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+    ldr     r2, [rSELF, #offThread_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    mov     r2, r0                      @ copy to r2
+    bne     .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish:   @ field ptr in r0
+    FETCH(r2, 3)                        @ r2<- BBBB
+    FETCH_ADVANCE_INST(4)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[BBBB]
+    ldr     r2, [rSELF, #offThread_cardTable]  @ r2<- card table base
+    ldr     r9, [r0, #offField_clazz]   @ r9<- field->clazz
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SMP_DMB                            @ releasing store
+    str     r1, [r0, #offStaticField_value]  @ field<- vBBBB
+    cmp     r1, #0                      @ stored a null object?
+    strneb  r2, [r2, r9, lsr #GC_CARD_SHIFT]  @ mark card based on obj head
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
     .size   dvmAsmSisterStart, .-dvmAsmSisterStart
     .global dvmAsmSisterEnd
 dvmAsmSisterEnd:
 
+
+    .global dvmAsmAltInstructionStart
+    .type   dvmAsmAltInstructionStart, %function
+dvmAsmAltInstructionStart:
+    .text
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (0 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (1 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (2 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (3 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (4 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (5 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (6 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (7 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (8 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (9 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (10 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (11 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (12 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (13 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (14 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (15 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (16 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (17 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (18 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (19 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (20 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (21 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (22 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (23 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (24 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (25 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (26 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (27 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (28 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (29 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (30 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (31 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (32 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (33 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (34 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (35 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (36 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (37 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (38 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (39 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (40 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (41 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (42 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (43 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (44 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (45 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (46 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (47 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (48 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (49 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (50 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (51 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (52 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (53 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (54 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (55 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (56 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (57 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (58 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (59 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (60 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (61 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (62 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (63 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (64 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (65 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (66 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (67 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (68 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (69 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (70 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (71 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (72 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (73 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (74 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (75 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (76 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (77 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (78 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (79 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (80 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (81 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (82 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (83 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (84 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (85 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (86 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (87 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (88 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (89 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (90 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (91 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (92 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (93 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (94 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (95 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (96 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (97 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (98 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (99 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (100 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (101 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (102 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (103 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (104 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (105 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (106 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (107 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (108 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (109 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (110 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (111 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (112 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (113 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (114 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (115 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (116 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (117 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (118 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (119 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (120 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (121 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (122 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (123 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (124 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (125 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (126 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (127 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (128 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (129 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (130 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (131 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (132 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (133 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (134 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (135 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (136 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (137 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (138 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (139 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (140 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (141 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (142 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (143 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (144 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (145 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (146 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (147 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (148 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (149 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (150 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (151 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (152 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (153 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (154 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (155 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (156 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (157 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (158 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (159 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (160 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (161 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (162 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (163 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (164 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (165 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (166 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (167 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (168 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (169 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (170 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (171 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (172 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (173 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (174 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (175 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (176 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (177 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (178 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (179 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (180 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (181 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (182 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (183 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (184 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (185 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (186 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (187 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (188 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (189 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (190 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (191 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (192 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (193 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (194 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (195 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (196 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (197 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (198 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (199 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (200 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (201 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (202 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (203 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (204 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (205 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (206 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (207 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (208 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (209 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (210 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (211 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (212 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (213 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (214 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (215 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (216 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (217 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (218 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (219 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (220 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (221 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (222 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (223 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (224 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (225 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (226 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (227 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (228 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (229 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (230 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (231 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (232 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (233 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (234 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (235 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (236 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (237 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (238 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (239 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (240 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (241 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (242 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (243 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (244 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (245 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (246 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (247 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (248 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (249 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (250 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (251 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (252 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (253 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (254 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (255 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (256 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (257 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (258 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (259 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (260 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (261 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (262 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (263 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (264 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (265 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (266 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (267 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (268 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (269 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (270 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (271 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (272 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (273 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (274 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (275 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (276 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (277 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (278 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (279 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (280 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (281 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (282 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (283 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (284 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (285 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (286 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (287 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (288 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (289 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (290 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (291 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (292 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (293 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (294 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (295 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (296 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (297 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (298 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (299 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (300 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (301 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (302 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (303 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (304 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (305 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (306 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (307 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (308 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (309 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (310 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (311 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (312 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (313 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (314 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (315 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (316 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (317 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (318 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (319 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (320 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (321 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (322 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (323 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (324 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (325 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (326 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (327 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (328 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (329 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (330 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (331 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (332 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (333 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (334 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (335 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (336 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (337 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (338 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (339 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (340 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (341 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (342 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (343 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (344 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (345 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (346 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (347 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (348 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (349 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (350 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (351 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (352 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (353 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (354 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (355 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (356 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (357 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (358 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (359 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (360 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (361 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (362 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (363 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (364 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (365 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (366 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (367 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (368 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (369 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (370 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (371 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (372 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (373 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (374 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (375 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (376 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (377 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (378 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (379 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (380 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (381 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (382 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (383 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (384 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (385 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (386 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (387 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (388 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (389 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (390 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (391 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (392 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (393 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (394 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (395 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (396 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (397 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (398 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (399 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (400 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (401 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (402 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (403 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (404 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (405 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (406 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (407 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (408 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (409 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (410 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (411 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (412 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (413 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (414 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (415 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (416 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (417 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (418 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (419 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (420 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (421 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (422 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (423 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (424 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (425 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (426 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (427 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (428 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (429 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (430 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (431 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (432 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (433 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (434 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (435 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (436 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (437 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (438 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (439 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (440 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (441 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (442 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (443 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (444 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (445 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (446 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (447 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (448 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (449 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (450 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (451 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (452 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (453 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (454 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (455 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (456 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (457 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (458 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (459 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (460 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (461 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (462 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (463 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (464 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (465 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (466 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (467 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (468 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (469 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (470 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (471 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (472 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (473 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (474 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (475 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (476 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (477 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (478 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (479 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (480 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (481 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (482 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (483 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (484 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (485 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (486 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (487 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (488 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (489 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (490 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (491 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (492 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (493 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (494 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (495 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (496 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (497 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (498 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (499 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (500 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (501 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (502 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (503 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (504 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (505 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (506 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (507 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (508 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (509 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (510 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+/* ------------------------------ */
+    .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to dvmCheckInst is done as a tail call.
+ */
+    adrl   lr, dvmAsmInstructionStart + (511 * 64)
+    mov    r0, rPC              @ arg0
+    mov    r1, rSELF            @ arg1
+    b      dvmCheckInst         @ (dPC,self) tail call to instruction checker
+
+    .balign 64
+    .size   dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+    .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
 /* File: armv5te/footer.S */
 
 /*
@@ -9312,71 +20851,64 @@
 #if defined(WITH_SELF_VERIFICATION)
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     mov    r3, #0
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChainNoProfile
 dvmJitToInterpNoChainNoProfile:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     ldr    r0,[lr, #-1]                 @ pass our target PC
     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC                       @ pass our target PC
     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     mov    r3, #0                       @ 0 means !inJitCodeCache
-    str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
     b      jitSVShadowRunEnd            @ doesn't return
 #else
 /*
@@ -9388,7 +20920,6 @@
  */
     .global dvmJitToInterpPunt
 dvmJitToInterpPunt:
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    rPC, r0
 #if defined(WITH_JIT_TUNING)
     mov    r0,lr
@@ -9396,8 +20927,8 @@
 #endif
     EXPORT_PC()
     mov    r0, #0
-    str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
-    adrl   rIBASE, dvmAsmInstructionStart
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9411,17 +20942,17 @@
  */
     .global dvmJitToInterpSingleStep
 dvmJitToInterpSingleStep:
-    str    lr,[rGLUE,#offGlue_jitResumeNPC]
-    str    r1,[rGLUE,#offGlue_jitResumeDPC]
+    str    lr,[rSELF,#offThread_jitResumeNPC]
+    str    r1,[rSELF,#offThread_jitResumeDPC]
     mov    r1,#kInterpEntryInstr
     @ enum is 4 byte in aapcs-EABI
-    str    r1, [rGLUE, #offGlue_entryPoint]
+    str    r1, [rSELF, #offThread_entryPoint]
     mov    rPC,r0
     EXPORT_PC()
 
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r1,#1                  @ set changeInterp to bail to debug interp
     b      common_gotoBail
 
@@ -9434,10 +20965,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0                    @ !0 means translation exists
@@ -9452,12 +20982,11 @@
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST, #-4              @  .. which is 9 bytes back
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr       @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    2f
     mov    r1,rINST
@@ -9470,7 +20999,7 @@
 
 /* No translation, so request one if profiling isn't disabled*/
 2:
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     GET_JIT_PROF_TABLE(r0)
     FETCH_INST()
     cmp    r0, #0
@@ -9496,15 +21025,14 @@
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     ldr    rPC,[lr, #-1]           @ get our target PC
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     add    rINST,lr,#-5            @ save start of chain branch
     add    rINST,#-4               @ .. which is 9 bytes back
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNormal
 #endif
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr      @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     cmp    r0,#0
     beq    toInterpreter            @ go if not, otherwise do chain
     mov    r1,rINST
@@ -9524,16 +21052,15 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
     bxne   r0                       @ continue native execution if so
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -9547,10 +21074,9 @@
 #if defined(WITH_JIT_TUNING)
     bl     dvmBumpNoChain
 #endif
-    ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     mov    r0,rPC
-    bl     dvmJitGetCodeAddr        @ Is there a translation?
-    str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl     dvmJitGetTraceAddr       @ Is there a translation?
+    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov    r1, rPC                  @ arg1 of translation may need this
     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
     cmp    r0,#0
@@ -9559,13 +21085,13 @@
 
 /*
  * No translation, restore interpreter regs and start interpreting.
- * rGLUE & rFP were preserved in the translated code, and rPC has
+ * rSELF & rFP were preserved in the translated code, and rPC has
  * already been restored by the time we get here.  We'll need to set
  * up rIBASE & rINST, and load the address of the JitTable into r0.
  */
 toInterpreter:
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_JIT_PROF_TABLE(r0)
     @ NOTE: intended fallthrough
@@ -9597,13 +21123,13 @@
  * is already a native translation in place (and, if so,
  * jump to it now).
  */
+
     GET_JIT_THRESHOLD(r1)
-    ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
     EXPORT_PC()
     mov     r0,rPC
-    bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
-    str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+    bl      dvmJitGetTraceAddr          @ r0<- dvmJitGetTraceAddr(rPC)
+    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
     mov     r1, rPC                     @ arg1 of translation may need this
     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
     cmp     r0,#0
@@ -9624,9 +21150,8 @@
     cmp     r0, r10                     @ special case?
     bne     jitSVShadowRunStart         @ set up self verification shadow space
     @ Need to clear the inJitCodeCache flag
-    ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
     mov    r3, #0                       @ 0 means not in the JIT code cache
-    str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
+    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
     /* no return */
@@ -9637,9 +21162,10 @@
  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
  */
 common_selectTrace:
-    str     r2,[rGLUE,#offGlue_jitState]
+
+    str     r2,[rSELF,#offThread_jitState]
     mov     r2,#kInterpEntryInstr       @ normal entry reason
-    str     r2,[rGLUE,#offGlue_entryPoint]
+    str     r2,[rSELF,#offThread_entryPoint]
     mov     r1,#1                       @ set changeInterp
     b       common_gotoBail
 
@@ -9648,42 +21174,41 @@
  * Save PC and registers to shadow memory for self verification mode
  * before jumping to native translation.
  * On entry:
- *    rPC, rFP, rGLUE: the values that they should contain
+ *    rPC, rFP, rSELF: the values that they should contain
  *    r10: the address of the target translation.
  */
 jitSVShadowRunStart:
     mov     r0,rPC                      @ r0<- program counter
     mov     r1,rFP                      @ r1<- frame pointer
-    mov     r2,rGLUE                    @ r2<- InterpState pointer
+    mov     r2,rSELF                    @ r2<- self (Thread) pointer
     mov     r3,r10                      @ r3<- target translation
     bl      dvmSelfVerificationSaveState @ save registers to shadow space
     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
-    add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
     bx      r10                         @ jump to the translation
 
 /*
- * Restore PC, registers, and interpState to original values
+ * Restore PC, registers, and interpreter state to original values
  * before jumping back to the interpreter.
  */
 jitSVShadowRunEnd:
     mov    r1,rFP                        @ pass ending fp
+    mov    r3,rSELF                      @ pass self ptr for convenience
     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
-    ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
-    ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
-    ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
+    ldr    rPC,[rSELF,#offThread_pc]     @ restore PC
+    ldr    rFP,[rSELF,#offThread_fp]     @ restore FP
     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
     cmp    r1,#0                         @ check for punt condition
     beq    1f
     mov    r2,#kJitSelfVerification      @ ask for self verification
-    str    r2,[rGLUE,#offGlue_jitState]
+    str    r2,[rSELF,#offThread_jitState]
     mov    r2,#kInterpEntryInstr         @ normal entry reason
-    str    r2,[rGLUE,#offGlue_entryPoint]
+    str    r2,[rSELF,#offThread_entryPoint]
     mov    r1,#1                         @ set changeInterp
     b      common_gotoBail
 
 1:                                       @ exit to interpreter without check
     EXPORT_PC()
-    adrl   rIBASE, dvmAsmInstructionStart
+    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
     FETCH_INST()
     GET_INST_OPCODE(ip)
     GOTO_OPCODE(ip)
@@ -9738,48 +21263,20 @@
  *  r9 is trampoline PC adjustment *in bytes*
  */
 common_periodicChecks:
-    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
-
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
-
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrnes  ip, ip, r1                  @ ip<- suspendCount | debuggerActive
-    /*
-     * Don't switch the interpreter in the libdvm_traceview build even if the
-     * profiler is active.
-     * The code here is opted for less intrusion instead of performance.
-     * That is, *pActiveProfilers is still loaded into r2 even though it is not
-     * used when WITH_INLINE_PROFILING is defined.
-     */
-#if !defined(WITH_INLINE_PROFILING)
-    orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
-#endif
-
-
-    bxeq    lr                          @ all zero, return
-
+/* TUNING - make this a direct load when interpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak] @ r3<- &interpBreak
+    /* speculatively thread-specific suspend count */
+    ldr     ip, [rSELF, #offThread_suspendCount]
+    ldr     r1, [r1]                                @ r1<- interpBreak
+    cmp     r1, #0                                  @ anything unusual?
+    bxeq    lr                                      @ return if not
     /*
      * One or more interesting events have happened.  Figure out what.
      *
-     * If debugging or profiling are compiled in, we need to disambiguate.
-     *
      * r0 still holds the reentry type.
      */
-    ldr     ip, [r3]                    @ ip<- suspendCount (int)
     cmp     ip, #0                      @ want suspend?
-    beq     1f                          @ no, must be debugger/profiler
+    beq     3f                          @ no, must be something else
 
     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
 #if defined(WITH_JIT)
@@ -9787,77 +21284,86 @@
      * Refresh the Jit's cached copy of profile table pointer.  This pointer
      * doubles as the Jit's on/off switch.
      */
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
+    mov     r0, rSELF                  @ r0<- self
     ldr     r3, [r3] @ r3 <- pJitProfTable
     EXPORT_PC()                         @ need for precise GC
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh Jit's on/off switch
 #else
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r0, rSELF                   @ r0<- self
     EXPORT_PC()                         @ need for precise GC
 #endif
     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
 
     /*
-     * Reload the debugger/profiler enable flags.  We're checking to see
-     * if either of these got set while we were suspended.
-     *
-     * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
-     * is enabled or not as the profiling will be done inline.
+     * Reload the interpBreak flags - they may have changed while we
+     * were suspended.
      */
-    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
-    cmp     r1, #0                      @ debugger enabled?
-#if defined(WORKAROUND_CORTEX_A9_745320)
-    /* Don't use conditional loads if the HW defect exists */
-    beq     101f
-    ldrb    r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-101:
-#else
-    ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
-#endif
+/* TUNING - direct load when InterpBreak moved to Thread */
+    ldr     r1, [rSELF, #offThread_pInterpBreak]   @ r1<- &interpBreak
+    ldr     r1, [r1]                    @ r1<- interpBreak
+3:
+    /*
+     * TODO: this code is too fragile.  Need a general mechanism
+     * to identify what actions to take by submode.  Some profiling modes
+     * (instruction count) need to single-step, while method tracing
+     * may not.  Debugging with breakpoints can run unfettered, but
+     * source-level single-stepping requires Dalvik singlestepping.
+     * GC may require a one-shot action and then full-speed resumption.
+     */
+    ands    r1, #(kSubModeDebuggerActive | kSubModeEmulatorTrace | kSubModeInstCounting)
+    bxeq    lr                          @ nothing to do, return
 
-#if !defined(WITH_INLINE_PROFILING)
-    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
-    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
-    orrs    r1, r1, r2
-#else
-    cmp     r1, #0                      @ only consult the debuggerActive flag
-#endif
-
-    beq     2f
-
-1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
-    str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
+    @ debugger/profiler enabled, bail out; self->entryPoint was set above
+    str     r0, [rSELF, #offThread_entryPoint]  @ store r0, need for debug/prof
     add     rPC, rPC, r9                @ update rPC
     mov     r1, #1                      @ "want switch" = true
     b       common_gotoBail             @ side exit
 
-2:
-    bx      lr                          @ nothing to do, return
-
 
 /*
  * The equivalent of "goto bail", this calls through the "bail handler".
  *
- * State registers will be saved to the "glue" area before bailing.
+ * State registers will be saved to the "thread" area before bailing.
  *
  * On entry:
  *  r1 is "bool changeInterp", indicating if we want to switch to the
  *     other interpreter or just bail all the way out
  */
 common_gotoBail:
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
-    mov     r0, rGLUE                   @ r0<- glue ptr
-    b       dvmMterpStdBail             @ call(glue, changeInterp)
+    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
+    mov     r0, rSELF                   @ r0<- self ptr
+    b       dvmMterpStdBail             @ call(self, changeInterp)
 
     @add     r1, r1, #1                  @ using (boolean+1)
-    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @add     r0, rSELF, #offThread_jmpBuf @ r0<- &self->jmpBuf
     @bl      _longjmp                    @ does not return
     @bl      common_abort
 
 
 /*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+    @ prepare to copy args to "outs" area of current frame
+    add     rPC, rPC, #4                @ adjust pc to make return consistent
+    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    cmp     r2, #0                      @ no args?
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+    b       .LinvokeRangeArgs           @ handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
@@ -9871,16 +21377,15 @@
     beq     .LinvokeArgsDone            @ if no args, skip the rest
     FETCH(r1, 2)                        @ r1<- CCCC
 
+.LinvokeRangeArgs:
     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
     @ (very few methods have > 10 args; could unroll for common cases)
     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
 1:  ldr     r1, [r3], #4                @ val = *fp++
     subs    r2, r2, #1                  @ count--
     str     r1, [r10], #4               @ *outs++ = val
     bne     1b                          @ ...while count != 0
-    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
     b       .LinvokeArgsDone
 
 /*
@@ -9895,11 +21400,9 @@
     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
-    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
-    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     beq     .LinvokeArgsDone
 
-    @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
 .LinvokeNonRange:
     rsb     r2, r2, #5                  @ r2<- 5-r2
     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
@@ -9926,7 +21429,9 @@
     str     r2, [r10, #-4]!             @ *--outs = vD
 0:  @ fall through to .LinvokeArgsDone
 
-.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
+.LinvokeArgsDone: @ r0=methodToCall
+    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
     @ find space for the new stack frame, check for overflow
@@ -9934,13 +21439,15 @@
     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
 @    bl      common_dumpRegs
-    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
     cmp     r3, r9                      @ bottom < interpStackEnd?
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
     blo     .LstackOverflow             @ yes, this frame will overflow stack
 
     @ set up newSaveArea
+    ldr     lr, [lr]                    @ lr<- active submodes
 #ifdef EASY_GDB
     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
     str     ip, [r10, #offStackSaveArea_prevSave]
@@ -9951,13 +21458,14 @@
     mov     r9, #0
     str     r9, [r10, #offStackSaveArea_returnAddr]
 #endif
-#if defined(WITH_INLINE_PROFILING)
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     1f                          @ skip if not
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r1, r6
-    @ r0=methodToCall, r1=rGlue
+    mov     r1, rSELF
+    @ r0=methodToCall, r1=rSELF
     bl      dvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
+1:
     str     r0, [r10, #offStackSaveArea_method]
     tst     r3, #ACC_NATIVE
     bne     .LinvokeNative
@@ -9980,18 +21488,17 @@
     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
     mov     rPC, r2                         @ publish new rPC
-    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
 
-    @ Update "glue" values for the new method
-    @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
-    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
-    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    @ Update state values for the new method
+    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
+    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
 #if defined(WITH_JIT)
     GET_JIT_PROF_TABLE(r0)
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     cmp     r0,#0
     bne     common_updateProfile
     GOTO_OPCODE(ip)                         @ jump to next instruction
@@ -9999,22 +21506,23 @@
     mov     rFP, r1                         @ fp = newFp
     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
     mov     rINST, r9                       @ publish new rINST
-    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     GOTO_OPCODE(ip)                         @ jump to next instruction
 #endif
 
 .LinvokeNative:
     @ Prep for the native call
     @ r0=methodToCall, r1=newFp, r10=newSaveArea
-    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
-    ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
-    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+    str     r1, [rSELF, #offThread_curFrame]   @ self->curFrame = newFp
     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
-    mov     r9, r3                      @ r9<- glue->self (preserve)
+    ldr     lr, [lr]                    @ lr<- active submodes
 
     mov     r2, r0                      @ r2<- methodToCall
     mov     r0, r1                      @ r0<- newFp (points to args)
-    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+    add     r1, rSELF, #offThread_retval  @ r1<- &retval
+    mov     r3, rSELF                   @ arg3<- self
 
 #ifdef ASSIST_DEBUGGER
     /* insert fake function header to help gdb find the stack frame */
@@ -10027,36 +21535,27 @@
 .Lskip:
 #endif
 
-#if defined(WITH_INLINE_PROFILING)
-    @ r2=JNIMethod, r6=rGLUE
-    stmfd   sp!, {r2,r6}
-#endif
-
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    bne     330f                        @ hop if so
     mov     lr, pc                      @ set return addr
     ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-
-#if defined(WITH_INLINE_PROFILING)
-    @ r0=JNIMethod, r1=rGLUE
-    ldmfd   sp!, {r0-r1}
-    bl      dvmFastNativeMethodTraceExit
-#endif
-
+220:
 #if defined(WITH_JIT)
-    ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
+    ldr     r3, [rSELF, #offThread_ppJitProfTable] @ Refresh Jit's on/off status
 #endif
 
-    @ native return; r9=self, r10=newSaveArea
+    @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
-    ldr     r1, [r9, #offThread_exception] @ check for exception
+    ldr     r1, [rSELF, #offThread_exception] @ check for exception
 #if defined(WITH_JIT)
     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
 #endif
-    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
     cmp     r1, #0                      @ null?
-    str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
+    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
 #if defined(WITH_JIT)
-    str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
+    str     r3, [rSELF, #offThread_pJitProfTable] @ refresh cached on/off switch
 #endif
     bne     common_exceptionThrown      @ no, handle exception
 
@@ -10064,13 +21563,26 @@
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
+330:
+    @ r2=JNIMethod, r6=rSELF
+    stmfd   sp!, {r2,r6}
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ r0=JNIMethod, r1=rSELF
+    ldmfd   sp!, {r0-r1}
+    bl      dvmFastNativeMethodTraceExit
+    b       220b
+
 .LstackOverflow:    @ r0=methodToCall
     mov     r1, r0                      @ r1<- methodToCall
-    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     bl      dvmHandleStackOverflow
     b       common_exceptionThrown
 #ifdef ASSIST_DEBUGGER
     .fnend
+    .size   dalvik_mterp, .-dalvik_mterp
 #endif
 
 
@@ -10090,8 +21602,8 @@
     sub     sp, sp, #8                  @ space for args + pad
     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
     mov     r2, r0                      @ A2<- methodToCall
-    mov     r0, rGLUE                   @ A0<- glue
-    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rSELF                   @ A0<- self
+    SAVE_PC_FP_TO_SELF()                @ export state to "self"
     mov     r1, r9                      @ A1<- methodCallRange
     mov     r3, rINST, lsr #8           @ A3<- AA
     str     ip, [sp, #0]                @ A4<- ip
@@ -10113,19 +21625,21 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-#if defined(WITH_INLINE_PROFILING)
+    ldr     lr, [rSELF, #offThread_pInterpBreak]
+    SAVEAREA_FROM_FP(r0, rFP)
+    ldr     lr, [lr]                    @ lr<- active submodes
+    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+    ands    lr, #kSubModeMethodTrace    @ method tracing?
+    beq     333f
     stmfd   sp!, {r0-r3}                @ preserve r0-r3
-    mov     r0, r6
-    @ r0=rGlue
+    mov     r0, rSELF
+    @ r0=rSELF
     bl      dvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
-#endif
-    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+333:
     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
-    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
-    ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
     cmp     r2, #0                      @ is this a break frame?
 #if defined(WORKAROUND_CORTEX_A9_745320)
     /* Don't use conditional loads if the HW defect exists */
@@ -10139,14 +21653,14 @@
     beq     common_gotoBail             @ break frame, bail out completely
 
     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
-    str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
+    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
-    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    str     rFP, [rSELF, #offThread_curFrame]  @ self->curFrame = fp
 #if defined(WITH_JIT)
     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
-    str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
+    str     r1, [rSELF, #offThread_methodClassDex]
+    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
     cmp     r10, #0                      @ caller is compiled code
     blxne   r10
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
@@ -10154,7 +21668,7 @@
 #else
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     mov     rPC, r9                     @ publish new rPC
-    str     r1, [rGLUE, #offGlue_methodClassDex]
+    str     r1, [rSELF, #offThread_methodClassDex]
     GOTO_OPCODE(ip)                     @ jump to next instruction
 #endif
 
@@ -10163,8 +21677,8 @@
      */
      .if    0
 .LreturnOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_returnFromMethod
     b       common_resumeAfterGlueCall
     .endif
@@ -10187,13 +21701,12 @@
     mov     r9, #0
     bl      common_periodicChecks
 
-    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
-    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
-    mov     r1, r10                     @ r1<- self
+    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
+    mov     r1, rSELF                   @ r1<- self
     mov     r0, r9                      @ r0<- exception
     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
     mov     r3, #0                      @ r3<- NULL
-    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
 
     /* set up args and a local for "&fp" */
     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
@@ -10201,8 +21714,8 @@
     mov     ip, sp                      @ ip<- &fp
     mov     r3, #0                      @ r3<- false
     str     ip, [sp, #-4]!              @ *--sp = &fp
-    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
-    mov     r0, r10                     @ r0<- self
+    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
+    mov     r0, rSELF                   @ r0<- self
     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
     mov     r2, r9                      @ r2<- exception
     sub     r1, rPC, r1                 @ r1<- pc - method->insns
@@ -10212,11 +21725,11 @@
     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
 
     /* fix earlier stack overflow if necessary; may trash rFP */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
     beq     1f                          @ no, skip ahead
     mov     rFP, r0                     @ save relPc result in rFP
-    mov     r0, r10                     @ r0<- self
+    mov     r0, rSELF                   @ r0<- self
     mov     r1, r9                      @ r1<- exception
     bl      dvmCleanupStackOverflow     @ call(self)
     mov     r0, rFP                     @ restore result
@@ -10231,30 +21744,30 @@
     /* adjust locals to match self->curFrame and updated PC */
     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
-    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    str     r1, [rSELF, #offThread_method]  @ self->method = new method
     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
-    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
 
     /* release the tracked alloc on the exception */
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
 
     /* restore the exception if the handler wants it */
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
-    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
-.LnotCaughtLocally: @ r9=exception, r10=self
+.LnotCaughtLocally: @ r9=exception
     /* fix stack overflow if necessary */
-    ldrb    r1, [r10, #offThread_stackOverflowed]
+    ldrb    r1, [rSELF, #offThread_stackOverflowed]
     cmp     r1, #0                      @ did we overflow earlier?
-    movne   r0, r10                     @ if yes: r0<- self
+    movne   r0, rSELF                   @ if yes: r0<- self
     movne   r1, r9                      @ if yes: r1<- exception
     blne    dvmCleanupStackOverflow     @ if yes: call(self)
 
@@ -10263,14 +21776,14 @@
     /* call __android_log_print(prio, tag, format, ...) */
     /* "Exception %s from %s:%d not caught locally" */
     @ dvmLineNumFromPC(method, pc - method->insns)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     ldr     r1, [r0, #offMethod_insns]
     sub     r1, rPC, r1
     asr     r1, r1, #1
     bl      dvmLineNumFromPC
     str     r0, [sp, #-4]!
     @ dvmGetMethodSourceFile(method)
-    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r0, [rSELF, #offThread_method]
     bl      dvmGetMethodSourceFile
     str     r0, [sp, #-4]!
     @ exception->clazz->descriptor
@@ -10282,9 +21795,9 @@
     mov     r0, #3                      @ LOG_DEBUG
     bl      __android_log_print
 #endif
-    str     r9, [r10, #offThread_exception] @ restore exception
+    str     r9, [rSELF, #offThread_exception] @ restore exception
     mov     r0, r9                      @ r0<- exception
-    mov     r1, r10                     @ r1<- self
+    mov     r1, rSELF                   @ r1<- self
     bl      dvmReleaseTrackedAlloc      @ release the exception
     mov     r1, #0                      @ "want switch" = false
     b       common_gotoBail             @ bail out
@@ -10295,8 +21808,8 @@
      */
     .if     0
 .LexceptionOld:
-    SAVE_PC_FP_TO_GLUE()                @ export state
-    mov     r0, rGLUE                   @ arg to function
+    SAVE_PC_FP_TO_SELF()                @ export state
+    mov     r0, rSELF                   @ arg to function
     bl      dvmMterp_exceptionThrown
     b       common_resumeAfterGlueCall
     .endif
@@ -10307,7 +21820,7 @@
  * values and start executing at the next instruction.
  */
 common_resumeAfterGlueCall:
-    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
     FETCH_INST()                        @ load rINST from rPC
     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
     GOTO_OPCODE(ip)                     @ jump to next instruction
@@ -10315,15 +21828,14 @@
 /*
  * Invalid array index. Note that our calling convention is strange; we use r1
  * and r3 because those just happen to be the registers all our callers are
- * using. We shuffle them here before calling the C function.
+ * using. We move r3 before calling the C function, but r1 happens to match.
  * r1: index
  * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
-    mov     r0, r1
-    mov     r1, r3
-    bl      dvmThrowAIOOBE
+    mov     r0, r3
+    bl      dvmThrowArrayIndexOutOfBoundsException
     b       common_exceptionThrown
 
 /*
@@ -10331,29 +21843,28 @@
  */
 common_errDivideByZero:
     EXPORT_PC()
-    ldr     r0, strArithmeticException
-    ldr     r1, strDivideByZero
-    bl      dvmThrowException
+    ldr     r0, strDivideByZero
+    bl      dvmThrowArithmeticException
     b       common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
  */
 common_errNegativeArraySize:
     EXPORT_PC()
-    ldr     r0, strNegativeArraySizeException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1                                @ arg0 <- len
+    bl      dvmThrowNegativeArraySizeException    @ (len)
     b       common_exceptionThrown
 
 /*
  * Invocation of a non-existent method.
+ * On entry: method name in r1
  */
 common_errNoSuchMethod:
     EXPORT_PC()
-    ldr     r0, strNoSuchMethodError
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, r1
+    bl      dvmThrowNoSuchMethodError
     b       common_exceptionThrown
 
 /*
@@ -10363,9 +21874,8 @@
  */
 common_errNullObject:
     EXPORT_PC()
-    ldr     r0, strNullPointerException
-    mov     r1, #0
-    bl      dvmThrowException
+    mov     r0, #0
+    bl      dvmThrowNullPointerException
     b       common_exceptionThrown
 
 /*
@@ -10501,17 +22011,8 @@
  * String references, must be close to the code that uses them.
  */
     .align  2
-strArithmeticException:
-    .word   .LstrArithmeticException
 strDivideByZero:
     .word   .LstrDivideByZero
-strNegativeArraySizeException:
-    .word   .LstrNegativeArraySizeException
-strNoSuchMethodError:
-    .word   .LstrNoSuchMethodError
-strNullPointerException:
-    .word   .LstrNullPointerException
-
 strLogTag:
     .word   .LstrLogTag
 strExceptionNotCaughtLocally:
@@ -10539,23 +22040,10 @@
 
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
-.LstrDivideByZero:
-    .asciz  "divide by zero"
 .LstrFilledNewArrayNotImpl:
     .asciz  "filled-new-array only implemented for objects and 'int'"
-.LstrInternalError:
-    .asciz  "Ljava/lang/InternalError;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrNullPointerException:
-    .asciz  "Ljava/lang/NullPointerException;"
-
+.LstrDivideByZero:
+    .asciz  "divide by zero"
 .LstrLogTag:
     .asciz  "mterp"
 .LstrExceptionNotCaughtLocally:
diff --git a/vm/mterp/out/InterpAsm-x86-atom.S b/vm/mterp/out/InterpAsm-x86-atom.S
index 654b974..19cb603 100644
--- a/vm/mterp/out/InterpAsm-x86-atom.S
+++ b/vm/mterp/out/InterpAsm-x86-atom.S
@@ -1814,9 +1814,7 @@
     GET_VREG    rINST                   # rINST<- vAA
     cmp         $0, rINST              # check for null object
     movl        offGlue_self(%eax), %eax # %eax<- glue->self
-#ifdef WITH_MONITOR_TRACKING
-    EXPORT_PC   # export PC so we can grab stack trace
-#endif
+    EXPORT_PC   # need for precise GC
     je          common_errNullObject    # handle null object
 #    jmp         .LOP_MONITOR_ENTER_finish
 #%break
@@ -1829,13 +1827,6 @@
                                         # return: void
     FFETCH_ADV  1, %edx                 # %edx<- next instruction hi; fetch, advance
     lea         8(%esp), %esp
-#ifdef WITH_DEADLOCK_PREDICTION
-    movl        rGLUE, %eax             # %eax<- pMterpGlue
-    movl        offGlue_self(%eax), %eax # %eax<- glue->self
-    movl        offThread_exception(%eax), %eax # %eax<- glue->self->exception
-    cmp         $0, %eax               # check for exception
-    jne         common_exceptionThrown  # handle exception
-#endif
     FGETOP_JMP  1, %edx                 # jump to next instruction; getop, jmp
 
 /* ------------------------------ */
@@ -14642,8 +14633,7 @@
     FINISH_A                            # jump to next instruction
 /* ------------------------------ */
     .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: x86-atom/OP_INVOKE_DIRECT_EMPTY.S */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
    /* Copyright (C) 2008 The Android Open Source Project
     *
     * Licensed under the Apache License, Version 2.0 (the "License");
@@ -14660,17 +14650,15 @@
     */
 
    /*
-    * File: OP_INVOKE_DIRECT_EMPTY.S
-    *
-    * Code: Used as a no-op. Uses no substitutions.
-    *
-    * For: invoke-direct-empty
-    *
-    * Format: B|A|op CCCC G|F|E|D (35c)
+    * File: stub.S
     */
 
-    FINISH 3
-
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_INVOKE_OBJECT_INIT_RANGE      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
 /* ------------------------------ */
     .balign 64
 .L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
@@ -15337,6 +15325,9360 @@
     call        common_abort
 
 
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: x86-atom/OP_CONST_CLASS_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: x86-atom/OP_CHECK_CAST_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: x86-atom/OP_INSTANCE_OF_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: x86-atom/OP_NEW_INSTANCE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: x86-atom/OP_NEW_ARRAY_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: x86-atom/OP_FILLED_NEW_ARRAY_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: x86-atom/OP_IGET_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: x86-atom/OP_IGET_WIDE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: x86-atom/OP_IGET_OBJECT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: x86-atom/OP_IGET_BOOLEAN_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: x86-atom/OP_IGET_BYTE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: x86-atom/OP_IGET_CHAR_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: x86-atom/OP_IGET_SHORT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: x86-atom/OP_IPUT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: x86-atom/OP_IPUT_WIDE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: x86-atom/OP_IPUT_OBJECT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: x86-atom/OP_IPUT_BOOLEAN_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: x86-atom/OP_IPUT_BYTE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: x86-atom/OP_IPUT_CHAR_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: x86-atom/OP_IPUT_SHORT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: x86-atom/OP_SGET_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: x86-atom/OP_SGET_WIDE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: x86-atom/OP_SGET_OBJECT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: x86-atom/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: x86-atom/OP_SGET_BYTE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: x86-atom/OP_SGET_CHAR_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: x86-atom/OP_SGET_SHORT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: x86-atom/OP_SPUT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: x86-atom/OP_SPUT_WIDE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: x86-atom/OP_SPUT_OBJECT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: x86-atom/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: x86-atom/OP_SPUT_BYTE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: x86-atom/OP_SPUT_CHAR_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: x86-atom/OP_SPUT_SHORT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: x86-atom/OP_INVOKE_VIRTUAL_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: x86-atom/OP_INVOKE_SUPER_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: x86-atom/OP_INVOKE_DIRECT_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: x86-atom/OP_INVOKE_STATIC_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: x86-atom/OP_INVOKE_INTERFACE_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: x86-atom/OP_UNUSED_27FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: x86-atom/OP_UNUSED_28FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: x86-atom/OP_UNUSED_29FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: x86-atom/OP_UNUSED_2AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: x86-atom/OP_UNUSED_2BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: x86-atom/OP_UNUSED_2CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: x86-atom/OP_UNUSED_2DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: x86-atom/OP_UNUSED_2EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: x86-atom/OP_UNUSED_2FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: x86-atom/OP_UNUSED_30FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: x86-atom/OP_UNUSED_31FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: x86-atom/OP_UNUSED_32FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: x86-atom/OP_UNUSED_33FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: x86-atom/OP_UNUSED_34FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: x86-atom/OP_UNUSED_35FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: x86-atom/OP_UNUSED_36FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: x86-atom/OP_UNUSED_37FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: x86-atom/OP_UNUSED_38FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: x86-atom/OP_UNUSED_39FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: x86-atom/OP_UNUSED_3AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: x86-atom/OP_UNUSED_3BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: x86-atom/OP_UNUSED_3CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: x86-atom/OP_UNUSED_3DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: x86-atom/OP_UNUSED_3EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: x86-atom/OP_UNUSED_3FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: x86-atom/OP_UNUSED_40FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: x86-atom/OP_UNUSED_41FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: x86-atom/OP_UNUSED_42FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: x86-atom/OP_UNUSED_43FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: x86-atom/OP_UNUSED_44FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: x86-atom/OP_UNUSED_45FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: x86-atom/OP_UNUSED_46FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: x86-atom/OP_UNUSED_47FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: x86-atom/OP_UNUSED_48FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: x86-atom/OP_UNUSED_49FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: x86-atom/OP_UNUSED_4AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: x86-atom/OP_UNUSED_4BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: x86-atom/OP_UNUSED_4CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: x86-atom/OP_UNUSED_4DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: x86-atom/OP_UNUSED_4EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: x86-atom/OP_UNUSED_4FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: x86-atom/OP_UNUSED_50FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: x86-atom/OP_UNUSED_51FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: x86-atom/OP_UNUSED_52FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: x86-atom/OP_UNUSED_53FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: x86-atom/OP_UNUSED_54FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: x86-atom/OP_UNUSED_55FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: x86-atom/OP_UNUSED_56FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: x86-atom/OP_UNUSED_57FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: x86-atom/OP_UNUSED_58FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: x86-atom/OP_UNUSED_59FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: x86-atom/OP_UNUSED_5AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: x86-atom/OP_UNUSED_5BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: x86-atom/OP_UNUSED_5CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: x86-atom/OP_UNUSED_5DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: x86-atom/OP_UNUSED_5EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: x86-atom/OP_UNUSED_5FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: x86-atom/OP_UNUSED_60FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: x86-atom/OP_UNUSED_61FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: x86-atom/OP_UNUSED_62FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: x86-atom/OP_UNUSED_63FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: x86-atom/OP_UNUSED_64FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: x86-atom/OP_UNUSED_65FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: x86-atom/OP_UNUSED_66FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: x86-atom/OP_UNUSED_67FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: x86-atom/OP_UNUSED_68FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: x86-atom/OP_UNUSED_69FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: x86-atom/OP_UNUSED_6AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: x86-atom/OP_UNUSED_6BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: x86-atom/OP_UNUSED_6CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: x86-atom/OP_UNUSED_6DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: x86-atom/OP_UNUSED_6EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: x86-atom/OP_UNUSED_6FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: x86-atom/OP_UNUSED_70FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: x86-atom/OP_UNUSED_71FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: x86-atom/OP_UNUSED_72FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: x86-atom/OP_UNUSED_73FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: x86-atom/OP_UNUSED_74FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: x86-atom/OP_UNUSED_75FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: x86-atom/OP_UNUSED_76FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: x86-atom/OP_UNUSED_77FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: x86-atom/OP_UNUSED_78FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: x86-atom/OP_UNUSED_79FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: x86-atom/OP_UNUSED_7AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: x86-atom/OP_UNUSED_7BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: x86-atom/OP_UNUSED_7CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: x86-atom/OP_UNUSED_7DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: x86-atom/OP_UNUSED_7EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: x86-atom/OP_UNUSED_7FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: x86-atom/OP_UNUSED_80FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: x86-atom/OP_UNUSED_81FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: x86-atom/OP_UNUSED_82FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: x86-atom/OP_UNUSED_83FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: x86-atom/OP_UNUSED_84FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: x86-atom/OP_UNUSED_85FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: x86-atom/OP_UNUSED_86FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: x86-atom/OP_UNUSED_87FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: x86-atom/OP_UNUSED_88FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: x86-atom/OP_UNUSED_89FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: x86-atom/OP_UNUSED_8AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: x86-atom/OP_UNUSED_8BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: x86-atom/OP_UNUSED_8CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: x86-atom/OP_UNUSED_8DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: x86-atom/OP_UNUSED_8EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: x86-atom/OP_UNUSED_8FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: x86-atom/OP_UNUSED_90FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: x86-atom/OP_UNUSED_91FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: x86-atom/OP_UNUSED_92FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: x86-atom/OP_UNUSED_93FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: x86-atom/OP_UNUSED_94FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: x86-atom/OP_UNUSED_95FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: x86-atom/OP_UNUSED_96FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: x86-atom/OP_UNUSED_97FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: x86-atom/OP_UNUSED_98FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: x86-atom/OP_UNUSED_99FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: x86-atom/OP_UNUSED_9AFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: x86-atom/OP_UNUSED_9BFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: x86-atom/OP_UNUSED_9CFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: x86-atom/OP_UNUSED_9DFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: x86-atom/OP_UNUSED_9EFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: x86-atom/OP_UNUSED_9FFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: x86-atom/OP_UNUSED_A0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: x86-atom/OP_UNUSED_A1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: x86-atom/OP_UNUSED_A2FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: x86-atom/OP_UNUSED_A3FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: x86-atom/OP_UNUSED_A4FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: x86-atom/OP_UNUSED_A5FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: x86-atom/OP_UNUSED_A6FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: x86-atom/OP_UNUSED_A7FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: x86-atom/OP_UNUSED_A8FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: x86-atom/OP_UNUSED_A9FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: x86-atom/OP_UNUSED_AAFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: x86-atom/OP_UNUSED_ABFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: x86-atom/OP_UNUSED_ACFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: x86-atom/OP_UNUSED_ADFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: x86-atom/OP_UNUSED_AEFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: x86-atom/OP_UNUSED_AFFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: x86-atom/OP_UNUSED_B0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: x86-atom/OP_UNUSED_B1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: x86-atom/OP_UNUSED_B2FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: x86-atom/OP_UNUSED_B3FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: x86-atom/OP_UNUSED_B4FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: x86-atom/OP_UNUSED_B5FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: x86-atom/OP_UNUSED_B6FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: x86-atom/OP_UNUSED_B7FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: x86-atom/OP_UNUSED_B8FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: x86-atom/OP_UNUSED_B9FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: x86-atom/OP_UNUSED_BAFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: x86-atom/OP_UNUSED_BBFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: x86-atom/OP_UNUSED_BCFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: x86-atom/OP_UNUSED_BDFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: x86-atom/OP_UNUSED_BEFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: x86-atom/OP_UNUSED_BFFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: x86-atom/OP_UNUSED_C0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: x86-atom/OP_UNUSED_C1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: x86-atom/OP_UNUSED_C2FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: x86-atom/OP_UNUSED_C3FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: x86-atom/OP_UNUSED_C4FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: x86-atom/OP_UNUSED_C5FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: x86-atom/OP_UNUSED_C6FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: x86-atom/OP_UNUSED_C7FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: x86-atom/OP_UNUSED_C8FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: x86-atom/OP_UNUSED_C9FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: x86-atom/OP_UNUSED_CAFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: x86-atom/OP_UNUSED_CBFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: x86-atom/OP_UNUSED_CCFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: x86-atom/OP_UNUSED_CDFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: x86-atom/OP_UNUSED_CEFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: x86-atom/OP_UNUSED_CFFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: x86-atom/OP_UNUSED_D0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: x86-atom/OP_UNUSED_D1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: x86-atom/OP_UNUSED_D2FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: x86-atom/OP_UNUSED_D3FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: x86-atom/OP_UNUSED_D4FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: x86-atom/OP_UNUSED_D5FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: x86-atom/OP_UNUSED_D6FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: x86-atom/OP_UNUSED_D7FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: x86-atom/OP_UNUSED_D8FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: x86-atom/OP_UNUSED_D9FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: x86-atom/OP_UNUSED_DAFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: x86-atom/OP_UNUSED_DBFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: x86-atom/OP_UNUSED_DCFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: x86-atom/OP_UNUSED_DDFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: x86-atom/OP_UNUSED_DEFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: x86-atom/OP_UNUSED_DFFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: x86-atom/OP_UNUSED_E0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: x86-atom/OP_UNUSED_E1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: x86-atom/OP_UNUSED_E2FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: x86-atom/OP_UNUSED_E3FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: x86-atom/OP_UNUSED_E4FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: x86-atom/OP_UNUSED_E5FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: x86-atom/OP_UNUSED_E6FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: x86-atom/OP_UNUSED_E7FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: x86-atom/OP_UNUSED_E8FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: x86-atom/OP_UNUSED_E9FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: x86-atom/OP_UNUSED_EAFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: x86-atom/OP_UNUSED_EBFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: x86-atom/OP_UNUSED_ECFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: x86-atom/OP_UNUSED_EDFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: x86-atom/OP_UNUSED_EEFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: x86-atom/OP_UNUSED_EFFF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: x86-atom/OP_UNUSED_F0FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: x86-atom/OP_UNUSED_F1FF.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_INVOKE_OBJECT_INIT_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IGET_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IGET_WIDE_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IGET_OBJECT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IPUT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IPUT_WIDE_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_IPUT_OBJECT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SGET_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SGET_WIDE_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SGET_OBJECT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SPUT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SPUT_WIDE_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: stub.S
+    */
+
+    SAVE_PC_FP_TO_GLUE %edx             # save program counter and frame pointer
+    pushl       rGLUE                   # push parameter glue
+    call        dvmMterp_OP_SPUT_OBJECT_VOLATILE_JUMBO      # call c-based implementation
+    lea         4(%esp), %esp
+    LOAD_PC_FP_FROM_GLUE                # restore program counter and frame pointer
+    FINISH_A                            # jump to next instruction
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: x86-atom/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+/* File: x86-atom/unused.S */
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: unused.S
+    *
+    * Code: Common code for unused bytecodes. Uses no subtitutions.
+    *
+    * For: all unused bytecodes
+    *
+    * Description: aborts if executed.
+    *
+    * Format: ØØ|op (10x)
+    *
+    * Syntax: op
+    */
+
+    call        common_abort
+
 
     .balign 64
     .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
@@ -15464,6 +24806,13 @@
     */
 
     EXPORT_PC                           # we will throw an exception
+#error BIT ROT!!!
+    /*
+     * TODO: Code here needs to call dvmThrowClassCastException with two
+     * arguments.
+     */
+#if 0
+    /* old obsolete code that called dvmThrowExceptionWithClassMessage */
     movl        $.LstrClassCastExceptionPtr, -8(%esp) # push parameter message
     movl        offObject_clazz(rINST), rINST # rINST<- obj->clazz
     movl        offClassObject_descriptor(rINST), rINST # rINST<- obj->clazz->descriptor
@@ -15472,6 +24821,7 @@
     call        dvmThrowExceptionWithClassMessage # call: (const char* exceptionDescriptor,
                                                   #       const char* messageDescriptor, Object* cause)
                                                   # return: void
+#endif
     lea         8(%esp), %esp
     jmp         common_exceptionThrown
 
@@ -15500,9 +24850,6 @@
     movl        %eax, %ecx              # %ecx<- resolved class
     jmp         .LOP_CHECK_CAST_resolved
 
-.LstrClassCastExceptionPtr:
-.asciz      "Ljava/lang/ClassCastException;"
-
 /* continuation for OP_INSTANCE_OF */
 
 .LOP_INSTANCE_OF_break:
@@ -17836,7 +27183,7 @@
 .long .L_OP_THROW_VERIFICATION_ERROR
 .long .L_OP_EXECUTE_INLINE
 .long .L_OP_EXECUTE_INLINE_RANGE
-.long .L_OP_INVOKE_DIRECT_EMPTY
+.long .L_OP_INVOKE_OBJECT_INIT_RANGE
 .long .L_OP_UNUSED_F1
 .long .L_OP_IGET_QUICK
 .long .L_OP_IGET_WIDE_QUICK
@@ -18502,8 +27849,6 @@
     .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
 .LstrArrayStoreException:
     .asciz "Ljava/lang/ArrayStoreException;"
-.LstrClassCastException:
-    .asciz "Ljava/lang/ClassCastException;"
 .LstrDivideByZero:
     .asciz "divide by zero"
 .LstrInstantiationError:
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
index b2bcd08..c2c0e65 100644
--- a/vm/mterp/out/InterpAsm-x86.S
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -55,7 +55,7 @@
 Mterp notes:
 
 Some key interpreter variables will be assigned to registers.  Note that each
-will also have an associated spill location (mostly used useful for those assigned
+will also have an associated spill location (mostly useful for those assigned
 to callee save registers).
 
   nick     reg   purpose
@@ -64,30 +64,30 @@
   rINSTw   bx    first 16-bit code of current instruction
   rINSTbl  bl    opcode portion of instruction word
   rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   edx   base of instruction handler table
 
 Notes:
    o High order 16 bits of ebx must be zero on entry to handler
    o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax, edx and ecx are scratch, rINSTw/ebx sometimes scratch
-   o rPC is in the caller save set, and will be killed across external calls. Don't
-     forget to SPILL/UNSPILL it around call points
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
 
 */
 
-#define rGLUE    (%ebp)
+#define rSELF    (%ebp)
 #define rPC      %esi
 #define rFP      %edi
 #define rINST    %ebx
 #define rINSTw   %bx
 #define rINSTbh  %bh
 #define rINSTbl  %bl
+#define rIBASE   %edx
 
 
 /* Frame diagram while executing dvmMterpStdRun, high to low addresses */
 #define IN_ARG0        ( 12)
 #define CALLER_RP      (  8)
 #define PREV_FP        (  4)
-#define rGLUE_SPILL    (  0) /* <- dvmMterpStdRun ebp */
+#define rSELF_SPILL    (  0) /* <- dvmMterpStdRun ebp */
 /* Spill offsets relative to %ebp */
 #define EDI_SPILL      ( -4)
 #define ESI_SPILL      ( -8)
@@ -95,13 +95,13 @@
 #define rPC_SPILL      (-16)
 #define rFP_SPILL      (-20)
 #define rINST_SPILL    (-24)
-#define TMP_SPILL1     (-28)
-#define TMP_SPILL2     (-32)
-#define TMP_SPILL3     (-36)
-#define LOCAL0_OFFSET  (-40)
-#define LOCAL1_OFFSET  (-44)
-#define LOCAL2_OFFSET  (-48)
-#define LOCAL3_OFFSET  (-52)
+#define rIBASE_SPILL   (-28)
+#define TMP_SPILL1     (-32)
+#define TMP_SPILL2     (-36)
+#define TMP_SPILL3     (-20)
+#define LOCAL0_OFFSET  (-44)
+#define LOCAL1_OFFSET  (-48)
+#define LOCAL2_OFFSET  (-52)
 /* Out Arg offsets, relative to %sp */
 #define OUT_ARG4       ( 16)
 #define OUT_ARG3       ( 12)
@@ -119,17 +119,26 @@
 #define SPILL_TMP3(reg) movl reg,TMP_SPILL3(%ebp)
 #define UNSPILL_TMP3(reg) movl TMP_SPILL3(%ebp),reg
 
-/* save/restore the PC and/or FP from the glue struct */
-.macro SAVE_PC_FP_TO_GLUE _reg
-    movl     rGLUE,\_reg
-    movl     rPC,offGlue_pc(\_reg)
-    movl     rFP,offGlue_fp(\_reg)
+#if defined(WITH_JIT)
+.macro GET_JIT_PROF_TABLE _self _reg
+    movl    offThread_pJitProfTable(\_self),\_reg
+.endm
+.macro GET_JIT_THRESHOLD _self _reg
+    movl    offThread_jitThreshold(\_self),\_reg
+.endm
+#endif
+
+/* save/restore the PC and/or FP from the self struct */
+.macro SAVE_PC_FP_TO_SELF _reg
+    movl     rSELF,\_reg
+    movl     rPC,offThread_pc(\_reg)
+    movl     rFP,offThread_fp(\_reg)
 .endm
 
-.macro LOAD_PC_FP_FROM_GLUE
-    movl    rGLUE,rFP
-    movl    offGlue_pc(rFP),rPC
-    movl    offGlue_fp(rFP),rFP
+.macro LOAD_PC_FP_FROM_SELF
+    movl    rSELF,rFP
+    movl    offThread_pc(rFP),rPC
+    movl    offThread_fp(rFP),rFP
 .endm
 
 /* The interpreter assumes a properly aligned stack on entry, and
@@ -138,7 +147,7 @@
 
 /*
  * "export" the PC to the interpreted stack frame, f/b/o future exception
- * objects.  Must * be done *before* something calls dvmThrowException.
+ * objects.  Must be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -214,7 +223,7 @@
 .macro GOTO_NEXT
      movzx   rINSTbl,%eax
      movzbl  rINSTbh,rINST
-     jmp     *dvmAsmInstructionJmpTable(,%eax,4)
+     jmp     *(rIBASE,%eax,4)
 .endm
 
    /*
@@ -223,7 +232,17 @@
     */
 .macro GOTO_NEXT_R _reg
      movzbl  1(rPC),rINST
-     jmp     *dvmAsmInstructionJmpTable(,\_reg,4)
+     jmp     *(rIBASE,\_reg,4)
+.endm
+
+   /*
+    * Jumbo version of GOTO_NEXT that assumes _reg preloaded with table
+    * offset of the jumbo instruction, which is the top half of the extended
+    * opcode + 0x100.  Loads rINST with BBBB field, similar to GOTO_NEXT_R
+    */
+.macro GOTO_NEXT_JUMBO_R _reg
+     movzwl  6(rPC),rINST
+     jmp     *(rIBASE,\_reg,4)
 .endm
 
 /*
@@ -245,310 +264,9 @@
     movl     \_reg,4*(\_offset)(rFP,\_vreg,4)
 .endm
 
-#if 1
-
-#define rFinish %edx
-
-/* Macros for x86-atom handlers */
-    /*
-    * Get the 32-bit value from a dalvik register.
-    */
-
-    .macro      GET_VREG _vreg
-    movl        (rFP,\_vreg, 4), \_vreg
-    .endm
-
-   /*
-    * Fetch the next instruction from the specified offset. Advances rPC
-    * to point to the next instruction. "_count" is in 16-bit code units.
-    *
-    * This must come AFTER anything that can throw an exception, or the
-    * exception catch may miss. (This also implies that it must come after
-    * EXPORT_PC())
-    */
-
-    .macro      FETCH_ADVANCE_INST _count
-    add         $(\_count*2), rPC
-    movzwl      (rPC), rINST
-    .endm
-
-   /*
-    * Fetch the next instruction from an offset specified by _reg. Updates
-    * rPC to point to the next instruction. "_reg" must specify the distance
-    * in bytes, *not* 16-bit code units, and may be a signed value.
-    */
-
-    .macro      FETCH_ADVANCE_INST_RB _reg
-    addl        \_reg, rPC
-    movzwl      (rPC), rINST
-    .endm
-
-   /*
-    * Fetch a half-word code unit from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op BBBB, it
-    * fetches BBBB.
-    */
-
-    .macro      FETCH _count _reg
-    movzwl      (\_count*2)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch a half-word code unit from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * This variant treats the value as signed.
-    */
-
-    .macro      FETCHs _count _reg
-    movswl      (\_count*2)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch the first byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op CC|BB, it
-    * fetches BB.
-    */
-
-    .macro      FETCH_BB _count _reg
-    movzbl      (\_count*2)(rPC), \_reg
-    .endm
-
-    /*
-    * Fetch the second byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op CC|BB, it
-    * fetches CC.
-    */
-
-    .macro      FETCH_CC _count _reg
-    movzbl      (\_count*2 + 1)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch the second byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * This variant treats the value as signed.
-    */
-
-    .macro      FETCH_CCs _count _reg
-    movsbl      (\_count*2 + 1)(rPC), \_reg
-    .endm
-
-
-   /*
-    * Fetch one byte from an offset past the current PC.  Pass in the same
-    * "_count" as you would for FETCH, and an additional 0/1 indicating which
-    * byte of the halfword you want (lo/hi).
-    */
-
-    .macro      FETCH_B _reg  _count  _byte
-    movzbl      (\_count*2+\_byte)(rPC), \_reg
-    .endm
-
-   /*
-    * Put the instruction's opcode field into the specified register.
-    */
-
-    .macro      GET_INST_OPCODE _reg
-    movzbl      rINSTbl, \_reg
-    .endm
-
-   /*
-    * Begin executing the opcode in _reg.
-    */
-
-    .macro      GOTO_OPCODE _reg
-    shl         $6, \_reg
-    addl        $dvmAsmInstructionStart,\_reg
-    jmp         *\_reg
-    .endm
-
-
-
-   /*
-    * Macros pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH _rFinish
-    movzbl      (rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_JMPa _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish and _count should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH_ADV _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_JMP _count _rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $(\_count*2), rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-    .macro      FGETOP_JMP2 _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-    .macro      OLD_JMP_1 _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    shl         $6, \_rFinish
-    .endm
-
-    .macro      OLD_JMP_2 _rFinish
-    addl        $dvmAsmInstructionStart,\_rFinish
-    .endm
-
-    .macro      OLD_JMP_3 _count
-    addl        $(\_count*2), rPC
-    .endm
-
-    .macro      OLD_JMP_4 _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *\_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_1 _reg _rFinish
-    movzbl      (rPC, \_reg), \_rFinish
-    shl         $6, \_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_2 _rFinish
-    addl        $dvmAsmInstructionStart,\_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_3 _reg _rFinish
-    addl        \_reg, rPC
-    movzbl      1(rPC, \_reg), rINST
-    jmp         *\_rFinish
-    .endm
-
-   /*
-    * Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish and _reg should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH_ADV_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_RB_JMP _reg _rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_INST, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH _rFinish
-    movzbl      (rPC), \_rFinish
-    movzbl      1(rPC), rINST
-    .endm
-
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH_ADVANCE _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $(\_count*2), rPC
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH_ADVANCE_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    .endm
-
-   /*
-    * Attempts to speed up GOTO_OPCODE using a jump table. This macro should
-    * be called after a FINISH_FETCH* instruction where rFinish should be the
-    * same register containing the opcode value. This is an attempt to split up
-    * FINISH in order to reduce or remove potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_JMP _rFinish
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_INST, GET_INST_OPCODE, GOTO_OPCODE by using
-    * a jump table. Uses a single macro - but it should be faster if we
-    * split up the fetch for rFinish and the jump using rFinish.
-    */
-
-    .macro      FINISH_A
-    movzbl      (rPC), rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE,
-    * GOTO_OPCODE by using a jump table. Uses a single macro -
-    * but it should be faster if we split up the fetch for rFinish
-    * and the jump using rFinish.
-    */
-
-    .macro      FINISH _count
-    movzbl      (\_count*2)(rPC), rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $(\_count*2), rPC
-    jmp         *dvmAsmInstructionJmpTable(,rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE,
-    * GOTO_OPCODE by using a jump table. Uses a single macro -
-    * but it should be faster if we split up the fetch for rFinish
-    * and the jump using rFinish.
-    */
-
-    .macro      FINISH_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
 #define sReg0 LOCAL0_OFFSET(%ebp)
 #define sReg1 LOCAL1_OFFSET(%ebp)
 #define sReg2 LOCAL2_OFFSET(%ebp)
-#define sReg3 LOCAL3_OFFSET(%ebp)
 
    /*
     * Hard coded helper values.
@@ -584,7 +302,6 @@
 
 .LintMax:
 .long   0x7FFFFFFF
-#endif
 
 
 /*
@@ -593,22 +310,24 @@
  */
 #include "../common/asm-constants.h"
 
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
 
-    .global dvmAsmInstructionStart
-    .type   dvmAsmInstructionStart, %function
-dvmAsmInstructionStart = .L_OP_NOP
+
+    .global dvmAsmInstructionStartCode
+    .type   dvmAsmInstructionStartCode, %function
+dvmAsmInstructionStartCode = .L_OP_NOP
     .text
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_NOP: /* 0x00 */
 /* File: x86/OP_NOP.S */
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE: /* 0x01 */
 /* File: x86/OP_MOVE.S */
     /* for move, move-object, long-to-int */
@@ -616,42 +335,39 @@
     movzbl rINSTbl,%eax          # eax<- BA
     andb   $0xf,%al             # eax<- A
     shrl   $4,rINST            # rINST<- B
-    GET_VREG_R %ecx rINST
-    FETCH_INST_OPCODE 1 %edx
+    GET_VREG_R rINST rINST
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG %ecx %eax           # fp[A]<-fp[B]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax           # fp[A]<-fp[B]
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_FROM16: /* 0x02 */
 /* File: x86/OP_MOVE_FROM16.S */
     /* for: move/from16, move-object/from16 */
     /* op vAA, vBBBB */
     movzx    rINSTbl,%eax              # eax <= AA
     movw     2(rPC),rINSTw             # rINSTw <= BBBB
-    GET_VREG_R %ecx rINST              # ecx<- fp[BBBB]
-    FETCH_INST_OPCODE 2 %edx
+    GET_VREG_R rINST rINST             # rINST- fp[BBBB]
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    SET_VREG %ecx %eax                # fp[AA]<- ecx]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax                # fp[AA]<- ecx]
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_16: /* 0x03 */
 /* File: x86/OP_MOVE_16.S */
     /* for: move/16, move-object/16 */
     /* op vAAAA, vBBBB */
     movzwl    4(rPC),%ecx              # ecx<- BBBB
     movzwl    2(rPC),%eax              # eax<- AAAA
-    GET_VREG_R  %ecx %ecx
-    FETCH_INST_OPCODE 3 %edx
+    GET_VREG_R  rINST %ecx
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    SET_VREG  %ecx %eax
-    GOTO_NEXT_R %edx
+    SET_VREG  rINST %eax
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_WIDE: /* 0x04 */
 /* File: x86/OP_MOVE_WIDE.S */
     /* move-wide vA, vB */
@@ -661,14 +377,13 @@
     GET_VREG_WORD %eax rINST 0            # eax<- v[B+0]
     GET_VREG_WORD rINST rINST 1           # rINST<- v[B+1]
     andb      $0xf,%cl                   # ecx <- A
-    FETCH_INST_OPCODE 1 %edx
     SET_VREG_WORD rINST %ecx 1            # v[A+1]<- rINST
-    ADVANCE_PC 1
     SET_VREG_WORD %eax %ecx 0             # v[A+0]<- eax
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_WIDE_FROM16: /* 0x05 */
 /* File: x86/OP_MOVE_WIDE_FROM16.S */
     /* move-wide/from16 vAA, vBBBB */
@@ -677,14 +392,13 @@
     movzbl    rINSTbl,%eax             # eax<- AAAA
     GET_VREG_WORD rINST %ecx 0         # rINST<- v[BBBB+0]
     GET_VREG_WORD %ecx %ecx 1          # ecx<- v[BBBB+1]
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     SET_VREG_WORD rINST %eax 0         # v[AAAA+0]<- rINST
     SET_VREG_WORD %ecx %eax 1          # v[AAAA+1]<- eax
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_WIDE_16: /* 0x06 */
 /* File: x86/OP_MOVE_WIDE_16.S */
     /* move-wide/16 vAAAA, vBBBB */
@@ -693,14 +407,13 @@
     movzwl    2(rPC),%eax            # eax<- AAAA
     GET_VREG_WORD rINST %ecx 0       # rINSTw_WORD<- v[BBBB+0]
     GET_VREG_WORD %ecx %ecx 1        # ecx<- v[BBBB+1]
-    FETCH_INST_OPCODE 3 %edx
     SET_VREG_WORD rINST %eax 0       # v[AAAA+0]<- rINST
-    ADVANCE_PC 3
     SET_VREG_WORD %ecx %eax 1        # v[AAAA+1]<- ecx
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 3 %ecx
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_OBJECT: /* 0x07 */
 /* File: x86/OP_MOVE_OBJECT.S */
 /* File: x86/OP_MOVE.S */
@@ -709,15 +422,14 @@
     movzbl rINSTbl,%eax          # eax<- BA
     andb   $0xf,%al             # eax<- A
     shrl   $4,rINST            # rINST<- B
-    GET_VREG_R %ecx rINST
-    FETCH_INST_OPCODE 1 %edx
+    GET_VREG_R rINST rINST
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG %ecx %eax           # fp[A]<-fp[B]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax           # fp[A]<-fp[B]
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
 /* File: x86/OP_MOVE_OBJECT_FROM16.S */
 /* File: x86/OP_MOVE_FROM16.S */
@@ -725,15 +437,14 @@
     /* op vAA, vBBBB */
     movzx    rINSTbl,%eax              # eax <= AA
     movw     2(rPC),rINSTw             # rINSTw <= BBBB
-    GET_VREG_R %ecx rINST              # ecx<- fp[BBBB]
-    FETCH_INST_OPCODE 2 %edx
+    GET_VREG_R rINST rINST             # rINST- fp[BBBB]
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    SET_VREG %ecx %eax                # fp[AA]<- ecx]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax                # fp[AA]<- ecx]
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_OBJECT_16: /* 0x09 */
 /* File: x86/OP_MOVE_OBJECT_16.S */
 /* File: x86/OP_MOVE_16.S */
@@ -741,206 +452,190 @@
     /* op vAAAA, vBBBB */
     movzwl    4(rPC),%ecx              # ecx<- BBBB
     movzwl    2(rPC),%eax              # eax<- AAAA
-    GET_VREG_R  %ecx %ecx
-    FETCH_INST_OPCODE 3 %edx
+    GET_VREG_R  rINST %ecx
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    SET_VREG  %ecx %eax
-    GOTO_NEXT_R %edx
+    SET_VREG  rINST %eax
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_RESULT: /* 0x0a */
 /* File: x86/OP_MOVE_RESULT.S */
     /* for: move-result, move-result-object */
     /* op vAA */
-    movl     rGLUE,%eax                    # eax<- rGLUE
-    movzx    rINSTbl,%ecx                  # ecx<- AA
-    movl     offGlue_retval(%eax),%eax     # eax<- glue->retval.l
-    FETCH_INST_OPCODE 1 %edx
+    movl     rSELF,%eax                    # eax<- rSELF
+    movl     offThread_retval(%eax),%eax   # eax<- self->retval.l
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG  %eax %ecx                    # fp[AA]<- retval.l
-    GOTO_NEXT_R %edx
+    SET_VREG  %eax rINST                   # fp[AA]<- retval.l
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_RESULT_WIDE: /* 0x0b */
 /* File: x86/OP_MOVE_RESULT_WIDE.S */
     /* move-result-wide vAA */
-    movl    rGLUE,%ecx
-    movl    offGlue_retval(%ecx),%eax
-    movl    4+offGlue_retval(%ecx),%ecx
-    FETCH_INST_OPCODE 1 %edx
+    movl    rSELF,%ecx
+    movl    offThread_retval(%ecx),%eax
+    movl    4+offThread_retval(%ecx),%ecx
     SET_VREG_WORD %eax rINST 0     # v[AA+0] <- eax
     SET_VREG_WORD %ecx rINST 1     # v[AA+1] <- ecx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
 /* File: x86/OP_MOVE_RESULT_OBJECT.S */
 /* File: x86/OP_MOVE_RESULT.S */
     /* for: move-result, move-result-object */
     /* op vAA */
-    movl     rGLUE,%eax                    # eax<- rGLUE
-    movzx    rINSTbl,%ecx                  # ecx<- AA
-    movl     offGlue_retval(%eax),%eax     # eax<- glue->retval.l
-    FETCH_INST_OPCODE 1 %edx
+    movl     rSELF,%eax                    # eax<- rSELF
+    movl     offThread_retval(%eax),%eax   # eax<- self->retval.l
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG  %eax %ecx                    # fp[AA]<- retval.l
-    GOTO_NEXT_R %edx
+    SET_VREG  %eax rINST                   # fp[AA]<- retval.l
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MOVE_EXCEPTION: /* 0x0d */
 /* File: x86/OP_MOVE_EXCEPTION.S */
     /* move-exception vAA */
-    movl    rGLUE,%ecx
-    movl    offGlue_self(%ecx),%ecx    # ecx<- glue->self
+    movl    rSELF,%ecx
     movl    offThread_exception(%ecx),%eax # eax<- dvmGetException bypass
     SET_VREG %eax rINST                # fp[AA]<- exception object
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %eax
     ADVANCE_PC 1
     movl    $0,offThread_exception(%ecx) # dvmClearException bypass
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_RETURN_VOID: /* 0x0e */
 /* File: x86/OP_RETURN_VOID.S */
     jmp       common_returnFromMethod
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_RETURN: /* 0x0f */
 /* File: x86/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "self"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
      */
     /* op vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_R %eax rINST               # eax<- vAA
-    movl    %eax,offGlue_retval(%ecx)   # retval.i <- AA
+    movl    %eax,offThread_retval(%ecx)   # retval.i <- AA
     jmp     common_returnFromMethod
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_RETURN_WIDE: /* 0x10 */
 /* File: x86/OP_RETURN_WIDE.S */
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "self"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_WORD %eax rINST 0       # eax<- v[AA+0]
     GET_VREG_WORD rINST rINST 1      # rINST<- v[AA+1]
-    movl    %eax,offGlue_retval(%ecx)
-    movl    rINST,4+offGlue_retval(%ecx)
+    movl    %eax,offThread_retval(%ecx)
+    movl    rINST,4+offThread_retval(%ecx)
     jmp     common_returnFromMethod
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_RETURN_OBJECT: /* 0x11 */
 /* File: x86/OP_RETURN_OBJECT.S */
 /* File: x86/OP_RETURN.S */
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "self"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
      */
     /* op vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_R %eax rINST               # eax<- vAA
-    movl    %eax,offGlue_retval(%ecx)   # retval.i <- AA
+    movl    %eax,offThread_retval(%ecx)   # retval.i <- AA
     jmp     common_returnFromMethod
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_4: /* 0x12 */
 /* File: x86/OP_CONST_4.S */
     /* const/4 vA, #+B */
     movsx   rINSTbl,%eax              # eax<-ssssssBx
-    movl    $0xf,%ecx
-    andl    %eax,%ecx                 # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
+    movl    $0xf,rINST
+    andl    %eax,rINST                # rINST<- A
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
     sarl    $4,%eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_16: /* 0x13 */
 /* File: x86/OP_CONST_16.S */
     /* const/16 vAA, #+BBBB */
     movswl  2(rPC),%ecx                # ecx<- ssssBBBB
-    movl    rINST,%eax                 # eax<- AA
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     ADVANCE_PC 2
-    SET_VREG %ecx %eax                 # vAA<- ssssBBBB
-    GOTO_NEXT_R %edx
+    SET_VREG %ecx rINST                # vAA<- ssssBBBB
+    GOTO_NEXT_R %eax
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST: /* 0x14 */
 /* File: x86/OP_CONST.S */
     /* const vAA, #+BBBBbbbb */
     movl      2(rPC),%eax             # grab all 32 bits at once
-    movl      rINST,%ecx              # ecx<- AA
-    FETCH_INST_OPCODE 3 %edx
+    movl      rINST,rINST             # rINST<- AA
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    SET_VREG %eax %ecx                # vAA<- eax
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST               # vAA<- eax
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_HIGH16: /* 0x15 */
 /* File: x86/OP_CONST_HIGH16.S */
     /* const/high16 vAA, #+BBBB0000 */
     movzwl     2(rPC),%eax                # eax<- 0000BBBB
-    movl       rINST,%ecx                 # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     sall       $16,%eax                  # eax<- BBBB0000
-    SET_VREG %eax %ecx                    # vAA<- eax
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST                   # vAA<- eax
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_WIDE_16: /* 0x16 */
 /* File: x86/OP_CONST_WIDE_16.S */
     /* const-wide/16 vAA, #+BBBB */
     movswl    2(rPC),%eax               # eax<- ssssBBBB
-    cltd                                # rPC:eax<- ssssssssssssBBBB
-    SET_VREG_WORD %edx rINST 1          # store msw
-    FETCH_INST_OPCODE 2 %edx
+    SPILL(rIBASE)                       # preserve rIBASE (cltd trashes it)
+    cltd                                # rIBASE:eax<- ssssssssssssBBBB
+    SET_VREG_WORD rIBASE rINST 1        # store msw
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
     SET_VREG_WORD %eax rINST 0          # store lsw
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_WIDE_32: /* 0x17 */
 /* File: x86/OP_CONST_WIDE_32.S */
     /* const-wide/32 vAA, #+BBBBbbbb */
     movl     2(rPC),%eax                # eax<- BBBBbbbb
-    cltd                                # rPC:eax<- ssssssssssssBBBB
-    SET_VREG_WORD %edx rINST,1          # store msw
-    FETCH_INST_OPCODE 3 %edx
+    SPILL(rIBASE)                       # save rIBASE (cltd trashes it)
+    cltd                                # rIBASE:eax<- ssssssssssssBBBB
+    SET_VREG_WORD rIBASE rINST,1        # store msw
+    FETCH_INST_OPCODE 3 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
     SET_VREG_WORD %eax rINST 0          # store lsw
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_WIDE: /* 0x18 */
 /* File: x86/OP_CONST_WIDE.S */
     /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
@@ -949,101 +644,159 @@
     movl      6(rPC),rINST        # rINST<- msw
     leal      (rFP,%ecx,4),%ecx   # dst addr
     movl      rINST,4(%ecx)
-    FETCH_INST_OPCODE 5 %edx
     movl      %eax,(%ecx)
+    FETCH_INST_OPCODE 5 %ecx
     ADVANCE_PC 5
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_WIDE_HIGH16: /* 0x19 */
 /* File: x86/OP_CONST_WIDE_HIGH16.S */
     /* const-wide/high16 vAA, #+BBBB000000000000 */
     movzwl     2(rPC),%eax                # eax<- 0000BBBB
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     sall       $16,%eax                  # eax<- BBBB0000
     SET_VREG_WORD %eax rINST 1            # v[AA+1]<- eax
     xorl       %eax,%eax
     SET_VREG_WORD %eax rINST 0            # v[AA+0]<- eax
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_STRING: /* 0x1a */
 /* File: x86/OP_CONST_STRING.S */
 
     /* const/string vAA, String@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
     movl      (%ecx,%eax,4),%eax       # eax<- rResString[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     testl     %eax,%eax                # resolved yet?
     je        .LOP_CONST_STRING_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResString[BBBB]
+    SET_VREG  %eax rINST               # vAA<- rResString[BBBB]
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
+
+/* This is the less common path, so we'll redo some work
+   here rather than force spills on the common path */
+.LOP_CONST_STRING_resolve:
+    movl     rSELF,%eax
+    EXPORT_PC
+    movl     offThread_method(%eax),%eax # eax<- self->method
+    movzwl   2(rPC),%ecx               # ecx<- BBBB
+    movl     offMethod_clazz(%eax),%eax
+    movl     %ecx,OUT_ARG1(%esp)
+    movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveString          # go resolve
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                 # failed?
+    je       common_exceptionThrown
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_STRING_JUMBO: /* 0x1b */
 /* File: x86/OP_CONST_STRING_JUMBO.S */
 
     /* const/string vAA, String@BBBBBBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movl      2(rPC),%eax              # eax<- BBBBBBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
     movl      (%ecx,%eax,4),%eax       # eax<- rResString[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 3 %edx
+    FETCH_INST_OPCODE 3 %ecx
     testl     %eax,%eax                # resolved yet?
     je        .LOP_CONST_STRING_JUMBO_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResString[BBBB]
+    SET_VREG  %eax rINST               # vAA<- rResString[BBBB]
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
+
+/* This is the less common path, so we'll redo some work
+   here rather than force spills on the common path */
+.LOP_CONST_STRING_JUMBO_resolve:
+    movl     rSELF,%eax
+    EXPORT_PC
+    movl     offThread_method(%eax),%eax # eax<- self->method
+    movl     2(rPC),%ecx               # ecx<- BBBBBBBB
+    movl     offMethod_clazz(%eax),%eax
+    movl     %ecx,OUT_ARG1(%esp)
+    movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveString          # go resolve
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                 # failed?
+    je       common_exceptionThrown
+    FETCH_INST_OPCODE 3 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CONST_CLASS: /* 0x1c */
 /* File: x86/OP_CONST_CLASS.S */
 
     /* const/class vAA, Class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
     movl      (%ecx,%eax,4),%eax       # eax<- rResClasses[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 2 %edx
     testl     %eax,%eax                # resolved yet?
     je        .LOP_CONST_CLASS_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResClasses[BBBB]
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG  %eax rINST               # vAA<- rResClasses[BBBB]
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
+
+/* This is the less common path, so we'll redo some work
+   here rather than force spills on the common path */
+.LOP_CONST_CLASS_resolve:
+    movl     rSELF,%eax
+    EXPORT_PC
+    movl     offThread_method(%eax),%eax # eax<- self->method
+    movl     $1,OUT_ARG2(%esp)        # true
+    movzwl   2(rPC),%ecx               # ecx<- BBBB
+    movl     offMethod_clazz(%eax),%eax
+    movl     %ecx,OUT_ARG1(%esp)
+    movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveClass           # go resolve
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                 # failed?
+    je       common_exceptionThrown
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MONITOR_ENTER: /* 0x1d */
 /* File: x86/OP_MONITOR_ENTER.S */
     /*
      * Synchronize on an object.
      */
     /* monitor-enter vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_R %eax rINST               # eax<- vAA
-    movl    offGlue_self(%ecx),%ecx     # ecx<- glue->self
     FETCH_INST_WORD 1
     testl   %eax,%eax                   # null object?
-    EXPORT_PC                           # need for precise GC, MONITOR_TRACKING
-    jne     .LOP_MONITOR_ENTER_continue
-    jmp     common_errNullObject
+    EXPORT_PC                           # need for precise GC
+    je     common_errNullObject
+    movl    %ecx,OUT_ARG0(%esp)
+    movl    %eax,OUT_ARG1(%esp)
+    SPILL(rIBASE)
+    call    dvmLockObject               # dvmLockObject(self,object)
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_MONITOR_EXIT: /* 0x1e */
 /* File: x86/OP_MONITOR_EXIT.S */
     /*
@@ -1055,27 +808,35 @@
      */
     /* monitor-exit vAA */
     GET_VREG_R %eax rINST
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     EXPORT_PC
     testl   %eax,%eax                   # null object?
     je      .LOP_MONITOR_EXIT_errNullObject   # go if so
-    movl    offGlue_self(%ecx),%ecx     # ecx<- glue->self
     movl    %eax,OUT_ARG1(%esp)
     movl    %ecx,OUT_ARG0(%esp)
-    jmp     .LOP_MONITOR_EXIT_continue
+    SPILL(rIBASE)
+    call    dvmUnlockObject             # unlock(self,obj)
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
+    testl   %eax,%eax                   # success?
+    ADVANCE_PC 1
+    je      common_exceptionThrown      # no, exception pending
+    GOTO_NEXT_R %ecx
+.LOP_MONITOR_EXIT_errNullObject:
+    ADVANCE_PC 1                        # advance before throw
+    jmp     common_errNullObject
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CHECK_CAST: /* 0x1f */
 /* File: x86/OP_CHECK_CAST.S */
     /*
      * Check to see if a cast from one class to another is allowed.
      */
     /* check-cast vAA, class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     GET_VREG_R  rINST,rINST             # rINST<- vAA (object)
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     testl     rINST,rINST               # is oject null?
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
     je        .LOP_CHECK_CAST_okay          # null obj, cast always succeeds
@@ -1087,12 +848,59 @@
     cmpl      %eax,%ecx                 # same class (trivial success)?
     jne       .LOP_CHECK_CAST_fullcheck     # no, do full check
 .LOP_CHECK_CAST_okay:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  ecx holds obj->clazz
+     *  eax holds class resolved from BBBB
+     *  rINST holds object
+     */
+.LOP_CHECK_CAST_fullcheck:
+    movl    %eax,sReg0                 # we'll need the desired class on failure
+    movl    %eax,OUT_ARG1(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call    dvmInstanceofNonTrivial    # eax<- boolean result
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # failed?
+    jne     .LOP_CHECK_CAST_okay           # no, success
+
+    # A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC
+    movl    offObject_clazz(rINST),%eax
+    movl    %eax,OUT_ARG0(%esp)                 # arg0<- obj->clazz
+    movl    sReg0,%ecx
+    movl    %ecx,OUT_ARG1(%esp)                 # arg1<- desired class
+    call    dvmThrowClassCastException
+    jmp     common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path, and we're
+     * going to have to recreate some data.
+     *
+     *  rINST holds object
+     */
+.LOP_CHECK_CAST_resolve:
+    movl    rSELF,%ecx
+    EXPORT_PC
+    movzwl  2(rPC),%eax                # eax<- BBBB
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)        # arg1<- BBBB
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
+    movl    $0,OUT_ARG2(%esp)         # arg2<- false
+    movl    %ecx,OUT_ARG0(%esp)        # arg0<- method->clazz
+    SPILL(rIBASE)
+    call    dvmResolveClass            # eax<- resolved ClassObject ptr
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # got null?
+    je      common_exceptionThrown     # yes, handle exception
+    movl    offObject_clazz(rINST),%ecx  # ecx<- obj->clazz
+    jmp     .LOP_CHECK_CAST_resolved       # pick up where we left off
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_INSTANCE_OF: /* 0x20 */
 /* File: x86/OP_INSTANCE_OF.S */
     /*
@@ -1102,26 +910,87 @@
      * an already-resolved class.
      */
     /* instance-of vA, vB, class@CCCC */
-    movl    rINST,%eax                # eax<- BA
+    movl    rINST,%eax                  # eax<- BA
     sarl    $4,%eax                    # eax<- B
     GET_VREG_R %eax %eax                # eax<- vB (obj)
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     testl   %eax,%eax                   # object null?
-    movl    offGlue_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl    offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)                       # preserve rIBASE
     je      .LOP_INSTANCE_OF_store           # null obj, not instance, store it
-    movzwl  2(rPC),%edx                 # edx<- CCCC
+    movzwl  2(rPC),rIBASE               # rIBASE<- CCCC
     movl    offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
-    movl    (%ecx,%edx,4),%ecx          # ecx<- resolved class
+    movl    (%ecx,rIBASE,4),%ecx        # ecx<- resolved class
     movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
     testl   %ecx,%ecx                   # have we resolved this before?
     je      .LOP_INSTANCE_OF_resolve         # not resolved, do it now
 .LOP_INSTANCE_OF_resolved:  # eax<- obj->clazz, ecx<- resolved class
     cmpl    %eax,%ecx                   # same class (trivial success)?
     je      .LOP_INSTANCE_OF_trivial         # yes, trivial finish
-    jmp     .LOP_INSTANCE_OF_fullcheck       # no, do full check
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  eax holds obj->clazz
+     *  ecx holds class resolved from BBBB
+     *  rINST has BA
+     */
+    movl    %eax,OUT_ARG0(%esp)
+    movl    %ecx,OUT_ARG1(%esp)
+    call    dvmInstanceofNonTrivial     # eax<- boolean result
+    # fall through to OP_INSTANCE_OF_store
+
+    /*
+     * eax holds boolean result
+     * rINST holds BA
+     */
+.LOP_INSTANCE_OF_store:
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    andb    $0xf,rINSTbl               # <- A
+    ADVANCE_PC 2
+    SET_VREG %eax rINST                 # vA<- eax
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds A
+     */
+.LOP_INSTANCE_OF_trivial:
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    andb    $0xf,rINSTbl               # <- A
+    ADVANCE_PC 2
+    movl    $1,%eax
+    SET_VREG %eax rINST                 # vA<- true
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  rIBASE holds BBBB
+     *  rINST holds BA
+     */
+.LOP_INSTANCE_OF_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)         # arg1<- BBBB
+    movl    rSELF,%ecx
+    movl    offThread_method(%ecx),%ecx
+    movl    $1,OUT_ARG2(%esp)          # arg2<- true
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    EXPORT_PC
+    movl    %ecx,OUT_ARG0(%esp)         # arg0<- method->clazz
+    call    dvmResolveClass             # eax<- resolved ClassObject ptr
+    testl   %eax,%eax                   # success?
+    je      common_exceptionThrown      # no, handle exception
+/* Now, we need to sync up with fast path.  We need eax to
+ * hold the obj->clazz, and ecx to hold the resolved class
+ */
+    movl    %eax,%ecx                   # ecx<- resolved class
+    movl    rINST,%eax                  # eax<- BA
+    sarl    $4,%eax                    # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB (obj)
+    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
+    jmp     .LOP_INSTANCE_OF_resolved
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_ARRAY_LENGTH: /* 0x21 */
 /* File: x86/OP_ARRAY_LENGTH.S */
     /*
@@ -1133,23 +1002,23 @@
    andb     $0xf,%al                 # eax<- A
    testl    %ecx,%ecx                 # is null?
    je       common_errNullObject
-   FETCH_INST_OPCODE 1 %edx
-   movl     offArrayObject_length(%ecx),%ecx
+   movl     offArrayObject_length(%ecx),rINST
+   FETCH_INST_OPCODE 1 %ecx
    ADVANCE_PC 1
-   SET_VREG %ecx %eax
-   GOTO_NEXT_R %edx
+   SET_VREG rINST %eax
+   GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_NEW_INSTANCE: /* 0x22 */
 /* File: x86/OP_NEW_INSTANCE.S */
     /*
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
     EXPORT_PC
     movl      (%ecx,%eax,4),%ecx        # ecx<- resolved class
@@ -1157,11 +1026,52 @@
     je        .LOP_NEW_INSTANCE_resolve       # no, go do it
 .LOP_NEW_INSTANCE_resolved:  # on entry, ecx<- class
     cmpb      $CLASS_INITIALIZED,offClassObject_status(%ecx)
-    je        .LOP_NEW_INSTANCE_initialized
-    jmp       .LOP_NEW_INSTANCE_needinit
+    jne       .LOP_NEW_INSTANCE_needinit
+.LOP_NEW_INSTANCE_initialized:  # on entry, ecx<- class
+    movl      $ALLOC_DONT_TRACK,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    call     dvmAllocObject             # eax<- new object
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                  # success?
+    je       common_exceptionThrown     # no, bail out
+    SET_VREG %eax rINST
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Class initialization required.
+     *
+     *  ecx holds class object
+     */
+.LOP_NEW_INSTANCE_needinit:
+    SPILL_TMP1(%ecx)                    # save object
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmInitClass                # initialize class
+    UNSPILL_TMP1(%ecx)                  # restore object
+    testl   %eax,%eax                   # success?
+    jne     .LOP_NEW_INSTANCE_initialized     # success, continue
+    jmp     common_exceptionThrown      # go deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     */
+.LOP_NEW_INSTANCE_resolve:
+    movl    rSELF,%ecx
+    movzwl  2(rPC),%eax
+    movl    offThread_method(%ecx),%ecx   # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    movl    $0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass             # call(clazz,off,flags)
+    movl    %eax,%ecx                   # ecx<- resolved ClassObject ptr
+    testl   %ecx,%ecx                   # success?
+    jne     .LOP_NEW_INSTANCE_resolved        # good to go
+    jmp     common_exceptionThrown      # no, handle exception
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_NEW_ARRAY: /* 0x23 */
 /* File: x86/OP_NEW_ARRAY.S */
     /*
@@ -1172,24 +1082,61 @@
      * check for it here.
      */
     /* new-array vA, vB, class@CCCC */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     EXPORT_PC
-    movl    offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl    offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     movzwl  2(rPC),%eax                       # eax<- CCCC
     movl    offDvmDex_pResClasses(%ecx),%ecx  # ecx<- pDvmDex->pResClasses
+    SPILL(rIBASE)
     movl    (%ecx,%eax,4),%ecx                # ecx<- resolved class
     movzbl  rINSTbl,%eax
     sarl    $4,%eax                          # eax<- B
     GET_VREG_R %eax %eax                      # eax<- vB (array length)
     andb    $0xf,rINSTbl                     # rINST<- A
     testl   %eax,%eax
-    js      common_errNegativeArraySize       # bail
+    js      common_errNegativeArraySize       # bail, passing len in eax
     testl   %ecx,%ecx                         # already resolved?
     jne     .LOP_NEW_ARRAY_finish                # yes, fast path
-    jmp     .LOP_NEW_ARRAY_resolve               # resolve now
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *  ecx holds class (null here)
+     *  eax holds array length (vB)
+     */
+    movl    rSELF,%ecx
+    SPILL_TMP1(%eax)                   # save array length
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movzwl  2(rPC),%eax                # eax<- CCCC
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass            # eax<- call(clazz,ref,flag)
+    movl    %eax,%ecx
+    UNSPILL_TMP1(%eax)
+    testl   %ecx,%ecx                  # successful resolution?
+    je      common_exceptionThrown     # no, bail.
+# fall through to OP_NEW_ARRAY_finish
+
+    /*
+     * Finish allocation
+     *
+     * ecx holds class
+     * eax holds array length (vB)
+     */
+.LOP_NEW_ARRAY_finish:
+    movl    %ecx,OUT_ARG0(%esp)
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)
+    call    dvmAllocArrayByClass    # eax<- call(clazz,length,flags)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    testl   %eax,%eax               # failed?
+    je      common_exceptionThrown  # yup - go handle
+    SET_VREG %eax rINST
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_FILLED_NEW_ARRAY: /* 0x24 */
 /* File: x86/OP_FILLED_NEW_ARRAY.S */
     /*
@@ -1199,23 +1146,127 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    movl    rGLUE,%eax
-    movl    offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+    movl    rSELF,%eax
+    movl    offThread_methodClassDex(%eax),%eax # eax<- pDvmDex
     movzwl  2(rPC),%ecx                       # ecx<- BBBB
     movl    offDvmDex_pResClasses(%eax),%eax  # eax<- pDvmDex->pResClasses
+    SPILL(rIBASE)                             # preserve rIBASE
     movl    (%eax,%ecx,4),%eax                # eax<- resolved class
     EXPORT_PC
     testl   %eax,%eax                         # already resolved?
     jne     .LOP_FILLED_NEW_ARRAY_continue              # yes, continue
     # less frequent path, so we'll redo some work
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     movl    $0,OUT_ARG2(%esp)                # arg2<- false
     movl    %ecx,OUT_ARG1(%esp)               # arg1<- BBBB
-    movl    offGlue_method(%eax),%eax         # eax<- glue->method
-    jmp     .LOP_FILLED_NEW_ARRAY_more
+    movl    offThread_method(%eax),%eax         # eax<- self->method
+    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
+    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
+    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
+    testl   %eax,%eax                         # null?
+    je      common_exceptionThrown            # yes, handle it
+
+       # note: fall through to .LOP_FILLED_NEW_ARRAY_continue
+
+    /*
+     * On entry:
+     *    eax holds array class [r0]
+     *    rINST holds AA or BB [r10]
+     *    ecx is scratch
+     */
+.LOP_FILLED_NEW_ARRAY_continue:
+    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
+    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
+    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
+    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
+    movl    rSELF,%eax
+    cmpb    $'I',%cl                             # supported?
+    je      1f
+    cmpb    $'L',%cl
+    je      1f
+    cmpb    $'[',%cl
+    jne      .LOP_FILLED_NEW_ARRAY_notimpl                  # no, not handled yet
+1:
+    movl    %ecx,offThread_retval+4(%eax)           # save type
+    .if      (!0)
+    SPILL_TMP1(rINST)                              # save copy, need "B" later
+    sarl    $4,rINST
+    .endif
+    movl    rINST,OUT_ARG1(%esp)                  # arg1<- A or AA (length)
+    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                             # alloc successful?
+    je      common_exceptionThrown                # no, handle exception
+    movl    %eax,offThread_retval(%ecx)             # retval.l<- new array
+    movzwl  4(rPC),%ecx                           # ecx<- FEDC or CCCC
+    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
+
+/* at this point:
+ *     eax is pointer to tgt
+ *     rINST is length
+ *     ecx is FEDC or CCCC
+ *     TMP_SPILL1 is BA
+ *  We now need to copy values from registers into the array
+ */
+
+    .if 0
+    # set up src pointer
+    SPILL_TMP2(%esi)
+    SPILL_TMP3(%edi)
+    leal    (rFP,%ecx,4),%esi # set up src ptr
+    movl    %eax,%edi         # set up dst ptr
+    movl    rINST,%ecx        # load count register
+    rep
+    movsd
+    UNSPILL_TMP2(%esi)
+    UNSPILL_TMP3(%edi)
+    movl    rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+    .else
+    testl  rINST,rINST
+    je     4f
+    UNSPILL_TMP1(rIBASE)      # restore "BA"
+    andl   $0x0f,rIBASE      # rIBASE<- 0000000A
+    sall   $16,rIBASE        # rIBASE<- 000A0000
+    orl    %ecx,rIBASE        # rIBASE<- 000AFEDC
+3:
+    movl   $0xf,%ecx
+    andl   rIBASE,%ecx        # ecx<- next reg to load
+    GET_VREG_R %ecx %ecx
+    shrl   $4,rIBASE
+    leal   4(%eax),%eax
+    movl   %ecx,-4(%eax)
+    sub    $1,rINST
+    jne    3b
+4:
+    movl   rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+    .endif
+
+    cmpb    $'I',%al                        # Int array?
+    je      5f                               # skip card mark if so
+    movl    offThread_retval(%ecx),%eax        # eax<- object head
+    movl    offThread_cardTable(%ecx),%ecx     # card table base
+    shrl    $GC_CARD_SHIFT,%eax             # convert to card num
+    movb    %cl,(%ecx,%eax)                  # mark card based on object head
+5:
+    UNSPILL(rIBASE)                          # restore rIBASE
+    FETCH_INST_OPCODE 3 %ecx
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+    movl    $.LstrFilledNewArrayNotImplA,%eax
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowInternalError
+    jmp     common_exceptionThrown
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
 /* File: x86/OP_FILLED_NEW_ARRAY_RANGE.S */
 /* File: x86/OP_FILLED_NEW_ARRAY.S */
@@ -1226,24 +1277,128 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    movl    rGLUE,%eax
-    movl    offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+    movl    rSELF,%eax
+    movl    offThread_methodClassDex(%eax),%eax # eax<- pDvmDex
     movzwl  2(rPC),%ecx                       # ecx<- BBBB
     movl    offDvmDex_pResClasses(%eax),%eax  # eax<- pDvmDex->pResClasses
+    SPILL(rIBASE)                             # preserve rIBASE
     movl    (%eax,%ecx,4),%eax                # eax<- resolved class
     EXPORT_PC
     testl   %eax,%eax                         # already resolved?
     jne     .LOP_FILLED_NEW_ARRAY_RANGE_continue              # yes, continue
     # less frequent path, so we'll redo some work
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     movl    $0,OUT_ARG2(%esp)                # arg2<- false
     movl    %ecx,OUT_ARG1(%esp)               # arg1<- BBBB
-    movl    offGlue_method(%eax),%eax         # eax<- glue->method
-    jmp     .LOP_FILLED_NEW_ARRAY_RANGE_more
+    movl    offThread_method(%eax),%eax         # eax<- self->method
+    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
+    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
+    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
+    testl   %eax,%eax                         # null?
+    je      common_exceptionThrown            # yes, handle it
+
+       # note: fall through to .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+    /*
+     * On entry:
+     *    eax holds array class [r0]
+     *    rINST holds AA or BB [r10]
+     *    ecx is scratch
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
+    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
+    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
+    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
+    movl    rSELF,%eax
+    cmpb    $'I',%cl                             # supported?
+    je      1f
+    cmpb    $'L',%cl
+    je      1f
+    cmpb    $'[',%cl
+    jne      .LOP_FILLED_NEW_ARRAY_RANGE_notimpl                  # no, not handled yet
+1:
+    movl    %ecx,offThread_retval+4(%eax)           # save type
+    .if      (!1)
+    SPILL_TMP1(rINST)                              # save copy, need "B" later
+    sarl    $4,rINST
+    .endif
+    movl    rINST,OUT_ARG1(%esp)                  # arg1<- A or AA (length)
+    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                             # alloc successful?
+    je      common_exceptionThrown                # no, handle exception
+    movl    %eax,offThread_retval(%ecx)             # retval.l<- new array
+    movzwl  4(rPC),%ecx                           # ecx<- FEDC or CCCC
+    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
+
+/* at this point:
+ *     eax is pointer to tgt
+ *     rINST is length
+ *     ecx is FEDC or CCCC
+ *     TMP_SPILL1 is BA
+ *  We now need to copy values from registers into the array
+ */
+
+    .if 1
+    # set up src pointer
+    SPILL_TMP2(%esi)
+    SPILL_TMP3(%edi)
+    leal    (rFP,%ecx,4),%esi # set up src ptr
+    movl    %eax,%edi         # set up dst ptr
+    movl    rINST,%ecx        # load count register
+    rep
+    movsd
+    UNSPILL_TMP2(%esi)
+    UNSPILL_TMP3(%edi)
+    movl    rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+    .else
+    testl  rINST,rINST
+    je     4f
+    UNSPILL_TMP1(rIBASE)      # restore "BA"
+    andl   $0x0f,rIBASE      # rIBASE<- 0000000A
+    sall   $16,rIBASE        # rIBASE<- 000A0000
+    orl    %ecx,rIBASE        # rIBASE<- 000AFEDC
+3:
+    movl   $0xf,%ecx
+    andl   rIBASE,%ecx        # ecx<- next reg to load
+    GET_VREG_R %ecx %ecx
+    shrl   $4,rIBASE
+    leal   4(%eax),%eax
+    movl   %ecx,-4(%eax)
+    sub    $1,rINST
+    jne    3b
+4:
+    movl   rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+    .endif
+
+    cmpb    $'I',%al                        # Int array?
+    je      5f                               # skip card mark if so
+    movl    offThread_retval(%ecx),%eax        # eax<- object head
+    movl    offThread_cardTable(%ecx),%ecx     # card table base
+    shrl    $GC_CARD_SHIFT,%eax             # convert to card num
+    movb    %cl,(%ecx,%eax)                  # mark card based on object head
+5:
+    UNSPILL(rIBASE)                          # restore rIBASE
+    FETCH_INST_OPCODE 3 %ecx
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+    movl    $.LstrFilledNewArrayNotImplA,%eax
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowInternalError
+    jmp     common_exceptionThrown
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_FILL_ARRAY_DATA: /* 0x26 */
 /* File: x86/OP_FILL_ARRAY_DATA.S */
     /* fill-array-data vAA, +BBBBBBBB */
@@ -1253,32 +1408,31 @@
     EXPORT_PC
     movl    %eax,OUT_ARG0(%esp)
     movl    %ecx,OUT_ARG1(%esp)
+    SPILL(rIBASE)
     call    dvmInterpHandleFillArrayData
-    FETCH_INST_OPCODE 3 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 3 %ecx
     testl   %eax,%eax                   # exception thrown?
     je      common_exceptionThrown
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_THROW: /* 0x27 */
 /* File: x86/OP_THROW.S */
     /*
      * Throw an exception object in the current thread.
      */
     /* throw vAA */
-    movl     rGLUE,%ecx
     EXPORT_PC
     GET_VREG_R %eax rINST              # eax<- exception object
-    movl     offGlue_self(%ecx),%ecx   # ecx<- glue->self
+    movl     rSELF,%ecx                # ecx<- self
     testl    %eax,%eax                 # null object?
     je       common_errNullObject
     movl     %eax,offThread_exception(%ecx) # thread->exception<- obj
     jmp      common_exceptionThrown
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_GOTO: /* 0x28 */
 /* File: x86/OP_GOTO.S */
     /*
@@ -1297,7 +1451,6 @@
     GOTO_NEXT
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_GOTO_16: /* 0x29 */
 /* File: x86/OP_GOTO_16.S */
     /*
@@ -1315,7 +1468,6 @@
     GOTO_NEXT
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_GOTO_32: /* 0x2a */
 /* File: x86/OP_GOTO_32.S */
     /*
@@ -1336,7 +1488,6 @@
     GOTO_NEXT
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_PACKED_SWITCH: /* 0x2b */
 /* File: x86/OP_PACKED_SWITCH.S */
     /*
@@ -1354,7 +1505,9 @@
     leal    (rPC,%ecx,2),%ecx     # ecx<- PC + BBBBbbbb*2
     movl    %eax,OUT_ARG1(%esp)   # ARG1<- vAA
     movl    %ecx,OUT_ARG0(%esp)   # ARG0<- switchData
+    SPILL(rIBASE)
     call    dvmInterpHandlePackedSwitch
+    UNSPILL(rIBASE)
     testl   %eax,%eax
     movl    %eax,rINST            # set up word offset
     jle     common_backwardBranch # check on special actions
@@ -1363,7 +1516,6 @@
     GOTO_NEXT
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_SPARSE_SWITCH: /* 0x2c */
 /* File: x86/OP_SPARSE_SWITCH.S */
 /* File: x86/OP_PACKED_SWITCH.S */
@@ -1382,7 +1534,9 @@
     leal    (rPC,%ecx,2),%ecx     # ecx<- PC + BBBBbbbb*2
     movl    %eax,OUT_ARG1(%esp)   # ARG1<- vAA
     movl    %ecx,OUT_ARG0(%esp)   # ARG0<- switchData
+    SPILL(rIBASE)
     call    dvmInterpHandleSparseSwitch
+    UNSPILL(rIBASE)
     testl   %eax,%eax
     movl    %eax,rINST            # set up word offset
     jle     common_backwardBranch # check on special actions
@@ -1392,7 +1546,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CMPL_FLOAT: /* 0x2d */
 /* File: x86/OP_CMPL_FLOAT.S */
 /* File: x86/OP_CMPG_DOUBLE.S */
@@ -1410,21 +1563,23 @@
     fucompp     # z if equal, p set if NaN, c set if st0 < st1
     fnstsw   %ax
     sahf
-    movl      rINST,%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     jp       .LOP_CMPL_FLOAT_isNaN
     je       .LOP_CMPL_FLOAT_finish
     sbbl     %ecx,%ecx
     jb       .LOP_CMPL_FLOAT_finish
     incl     %ecx
 .LOP_CMPL_FLOAT_finish:
-    SET_VREG %ecx %eax
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
+
+.LOP_CMPL_FLOAT_isNaN:
+    movl      $-1,%ecx
+    jmp       .LOP_CMPL_FLOAT_finish
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CMPG_FLOAT: /* 0x2e */
 /* File: x86/OP_CMPG_FLOAT.S */
 /* File: x86/OP_CMPG_DOUBLE.S */
@@ -1442,21 +1597,23 @@
     fucompp     # z if equal, p set if NaN, c set if st0 < st1
     fnstsw   %ax
     sahf
-    movl      rINST,%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     jp       .LOP_CMPG_FLOAT_isNaN
     je       .LOP_CMPG_FLOAT_finish
     sbbl     %ecx,%ecx
     jb       .LOP_CMPG_FLOAT_finish
     incl     %ecx
 .LOP_CMPG_FLOAT_finish:
-    SET_VREG %ecx %eax
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
+
+.LOP_CMPG_FLOAT_isNaN:
+    movl      $1,%ecx
+    jmp       .LOP_CMPG_FLOAT_finish
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CMPL_DOUBLE: /* 0x2f */
 /* File: x86/OP_CMPL_DOUBLE.S */
 /* File: x86/OP_CMPG_DOUBLE.S */
@@ -1474,21 +1631,23 @@
     fucompp     # z if equal, p set if NaN, c set if st0 < st1
     fnstsw   %ax
     sahf
-    movl      rINST,%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     jp       .LOP_CMPL_DOUBLE_isNaN
     je       .LOP_CMPL_DOUBLE_finish
     sbbl     %ecx,%ecx
     jb       .LOP_CMPL_DOUBLE_finish
     incl     %ecx
 .LOP_CMPL_DOUBLE_finish:
-    SET_VREG %ecx %eax
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
+
+.LOP_CMPL_DOUBLE_isNaN:
+    movl      $-1,%ecx
+    jmp       .LOP_CMPL_DOUBLE_finish
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CMPG_DOUBLE: /* 0x30 */
 /* File: x86/OP_CMPG_DOUBLE.S */
     /* float/double_cmp[gl] vAA, vBB, vCC */
@@ -1505,41 +1664,64 @@
     fucompp     # z if equal, p set if NaN, c set if st0 < st1
     fnstsw   %ax
     sahf
-    movl      rINST,%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     jp       .LOP_CMPG_DOUBLE_isNaN
     je       .LOP_CMPG_DOUBLE_finish
     sbbl     %ecx,%ecx
     jb       .LOP_CMPG_DOUBLE_finish
     incl     %ecx
 .LOP_CMPG_DOUBLE_finish:
-    SET_VREG %ecx %eax
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
+
+.LOP_CMPG_DOUBLE_isNaN:
+    movl      $1,%ecx
+    jmp       .LOP_CMPG_DOUBLE_finish
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_CMP_LONG: /* 0x31 */
 /* File: x86/OP_CMP_LONG.S */
     /*
      * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
      * register based on the results of the comparison.
      */
+    // TUNING: rework to avoid rIBASE spill
     /* cmp-long vAA, vBB, vCC */
     movzbl    2(rPC),%ecx              # ecx<- BB
-    movzbl    3(rPC),%edx              # edx<- CC
+    SPILL(rIBASE)
+    movzbl    3(rPC),rIBASE            # rIBASE- CC
     GET_VREG_WORD %eax %ecx,1          # eax<- v[BB+1]
     GET_VREG_WORD %ecx %ecx 0          # ecx<- v[BB+0]
-    cmpl      4(rFP,%edx,4),%eax
+    cmpl      4(rFP,rIBASE,4),%eax
     jl        .LOP_CMP_LONG_smaller
     jg        .LOP_CMP_LONG_bigger
-    sub       (rFP,%edx,4),%ecx
+    sub       (rFP,rIBASE,4),%ecx
     ja        .LOP_CMP_LONG_bigger
     jb        .LOP_CMP_LONG_smaller
-    jmp       .LOP_CMP_LONG_finish
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+.LOP_CMP_LONG_bigger:
+    movl      $1,%ecx
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+.LOP_CMP_LONG_smaller:
+    movl      $-1,%ecx
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_EQ: /* 0x32 */
 /* File: x86/OP_IF_EQ.S */
 /* File: x86/bincmp.S */
@@ -1569,7 +1751,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_NE: /* 0x33 */
 /* File: x86/OP_IF_NE.S */
 /* File: x86/bincmp.S */
@@ -1599,7 +1780,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_LT: /* 0x34 */
 /* File: x86/OP_IF_LT.S */
 /* File: x86/bincmp.S */
@@ -1629,7 +1809,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_GE: /* 0x35 */
 /* File: x86/OP_IF_GE.S */
 /* File: x86/bincmp.S */
@@ -1659,7 +1838,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_GT: /* 0x36 */
 /* File: x86/OP_IF_GT.S */
 /* File: x86/bincmp.S */
@@ -1689,7 +1867,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_LE: /* 0x37 */
 /* File: x86/OP_IF_LE.S */
 /* File: x86/bincmp.S */
@@ -1719,7 +1896,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_EQZ: /* 0x38 */
 /* File: x86/OP_IF_EQZ.S */
 /* File: x86/zcmp.S */
@@ -1745,7 +1921,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_NEZ: /* 0x39 */
 /* File: x86/OP_IF_NEZ.S */
 /* File: x86/zcmp.S */
@@ -1771,7 +1946,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_LTZ: /* 0x3a */
 /* File: x86/OP_IF_LTZ.S */
 /* File: x86/zcmp.S */
@@ -1797,7 +1971,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_GEZ: /* 0x3b */
 /* File: x86/OP_IF_GEZ.S */
 /* File: x86/zcmp.S */
@@ -1823,7 +1996,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_GTZ: /* 0x3c */
 /* File: x86/OP_IF_GTZ.S */
 /* File: x86/zcmp.S */
@@ -1849,7 +2021,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IF_LEZ: /* 0x3d */
 /* File: x86/OP_IF_LEZ.S */
 /* File: x86/zcmp.S */
@@ -1875,7 +2046,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_3E: /* 0x3e */
 /* File: x86/OP_UNUSED_3E.S */
 /* File: x86/unused.S */
@@ -1883,7 +2053,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_3F: /* 0x3f */
 /* File: x86/OP_UNUSED_3F.S */
 /* File: x86/unused.S */
@@ -1891,7 +2060,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_40: /* 0x40 */
 /* File: x86/OP_UNUSED_40.S */
 /* File: x86/unused.S */
@@ -1899,7 +2067,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_41: /* 0x41 */
 /* File: x86/OP_UNUSED_41.S */
 /* File: x86/unused.S */
@@ -1907,7 +2074,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_42: /* 0x42 */
 /* File: x86/OP_UNUSED_42.S */
 /* File: x86/unused.S */
@@ -1915,7 +2081,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_UNUSED_43: /* 0x43 */
 /* File: x86/OP_UNUSED_43.S */
 /* File: x86/unused.S */
@@ -1923,7 +2088,6 @@
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET: /* 0x44 */
 /* File: x86/OP_AGET.S */
     /*
@@ -1944,13 +2108,12 @@
                                         #    index in ecx
     movl     offArrayObject_contents(%eax,%ecx,4),%eax
 .LOP_AGET_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_WIDE: /* 0x45 */
 /* File: x86/OP_AGET_WIDE.S */
     /*
@@ -1965,13 +2128,19 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .LOP_AGET_WIDE_finish        # index < length, OK
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects
+    jae       common_errArrayIndex      # index >= length, bail.  Expects
                                         #    arrayObj in eax
                                         #    index in ecx
+    leal      offArrayObject_contents(%eax,%ecx,8),%eax
+    movl      (%eax),%ecx
+    movl      4(%eax),%eax
+    SET_VREG_WORD %ecx rINST 0
+    SET_VREG_WORD %eax rINST 1
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_OBJECT: /* 0x46 */
 /* File: x86/OP_AGET_OBJECT.S */
 /* File: x86/OP_AGET.S */
@@ -1993,14 +2162,13 @@
                                         #    index in ecx
     movl     offArrayObject_contents(%eax,%ecx,4),%eax
 .LOP_AGET_OBJECT_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_BOOLEAN: /* 0x47 */
 /* File: x86/OP_AGET_BOOLEAN.S */
 /* File: x86/OP_AGET.S */
@@ -2022,14 +2190,13 @@
                                         #    index in ecx
     movzbl     offArrayObject_contents(%eax,%ecx,1),%eax
 .LOP_AGET_BOOLEAN_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_BYTE: /* 0x48 */
 /* File: x86/OP_AGET_BYTE.S */
 /* File: x86/OP_AGET.S */
@@ -2051,14 +2218,13 @@
                                         #    index in ecx
     movsbl     offArrayObject_contents(%eax,%ecx,1),%eax
 .LOP_AGET_BYTE_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_CHAR: /* 0x49 */
 /* File: x86/OP_AGET_CHAR.S */
 /* File: x86/OP_AGET.S */
@@ -2080,14 +2246,13 @@
                                         #    index in ecx
     movzwl     offArrayObject_contents(%eax,%ecx,2),%eax
 .LOP_AGET_CHAR_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_AGET_SHORT: /* 0x4a */
 /* File: x86/OP_AGET_SHORT.S */
 /* File: x86/OP_AGET.S */
@@ -2109,14 +2274,13 @@
                                         #    index in ecx
     movswl     offArrayObject_contents(%eax,%ecx,2),%eax
 .LOP_AGET_SHORT_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT: /* 0x4b */
 /* File: x86/OP_APUT.S */
     /*
@@ -2137,14 +2301,13 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,4),%eax
 .LOP_APUT_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    movl     %ecx,(%eax)
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    movl     rINST,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_WIDE: /* 0x4c */
 /* File: x86/OP_APUT_WIDE.S */
     /*
@@ -2159,13 +2322,19 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .LOP_APUT_WIDE_finish        # index < length, OK
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects:
+    jae       common_errArrayIndex      # index >= length, bail.  Expects:
                                         #   arrayObj in eax
                                         #   index in ecx
+    leal      offArrayObject_contents(%eax,%ecx,8),%eax
+    GET_VREG_WORD %ecx rINST 0
+    GET_VREG_WORD rINST rINST 1
+    movl      %ecx,(%eax)
+    FETCH_INST_OPCODE 2 %ecx
+    movl      rINST,4(%eax)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_OBJECT: /* 0x4d */
 /* File: x86/OP_APUT_OBJECT.S */
     /*
@@ -2182,13 +2351,59 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .LOP_APUT_OBJECT_continue
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects
+    jae       common_errArrayIndex      # index >= length, bail.  Expects
                                         #    arrayObj in eax
                                         #    index in ecx
+    /* On entry:
+     *   eax<- array object
+     *   ecx<- index
+     *   rINST<- vAA
+     */
+    leal      offArrayObject_contents(%eax,%ecx,4),%ecx
+    testl     rINST,rINST                    # storing null reference?
+    je        .LOP_APUT_OBJECT_skip_check
+    SPILL_TMP1(%ecx)                         # save target address
+    SPILL_TMP2(%eax)                         # save object head
+    movl      offObject_clazz(%eax),%eax     # eax<- arrayObj->clazz
+    movl      offObject_clazz(rINST),%ecx    # ecx<- obj->clazz
+    movl      %eax,OUT_ARG1(%esp)
+    movl      %ecx,OUT_ARG0(%esp)
+    movl      %ecx,sReg0                     # store the two classes for later
+    movl      %eax,sReg1
+    SPILL(rIBASE)
+    call      dvmCanPutArrayElement          # test object type vs. array type
+    UNSPILL(rIBASE)
+    UNSPILL_TMP1(%ecx)                       # recover target address
+    testl     %eax,%eax
+    movl      rSELF,%eax
+    jne       .LOP_APUT_OBJECT_types_okay
+
+    # The types don't match.  We need to throw an ArrayStoreException.
+    EXPORT_PC
+    movl      sReg0,%eax                     # restore the two classes...
+    movl      %eax,OUT_ARG0(%esp)
+    movl      sReg1,%ecx
+    movl      %ecx,OUT_ARG1(%esp)
+    call      dvmThrowArrayStoreException    # ...and throw
+    jmp       common_exceptionThrown
+
+.LOP_APUT_OBJECT_types_okay:
+    movl      offThread_cardTable(%eax),%eax   # get card table base
+    movl      rINST,(%ecx)                   # store into array
+    UNSPILL_TMP2(rINST)                      # recover object head
+    FETCH_INST_OPCODE 2 %ecx
+    shrl      $GC_CARD_SHIFT,rINST          # object head to card number
+    movb      %al,(%eax,rINST)               # mark card using object head
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+.LOP_APUT_OBJECT_skip_check:
+    movl      rINST,(%ecx)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_BOOLEAN: /* 0x4e */
 /* File: x86/OP_APUT_BOOLEAN.S */
 /* File: x86/OP_APUT.S */
@@ -2210,15 +2425,14 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,1),%eax
 .LOP_APUT_BOOLEAN_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    movb     %cl,(%eax)
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    movb     rINSTbl,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_BYTE: /* 0x4f */
 /* File: x86/OP_APUT_BYTE.S */
 /* File: x86/OP_APUT.S */
@@ -2240,15 +2454,14 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,1),%eax
 .LOP_APUT_BYTE_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    movb     %cl,(%eax)
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    movb     rINSTbl,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_CHAR: /* 0x50 */
 /* File: x86/OP_APUT_CHAR.S */
 /* File: x86/OP_APUT.S */
@@ -2270,15 +2483,14 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,2),%eax
 .LOP_APUT_CHAR_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    movw     %cx,(%eax)
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    movw     rINSTw,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_APUT_SHORT: /* 0x51 */
 /* File: x86/OP_APUT_SHORT.S */
 /* File: x86/OP_APUT.S */
@@ -2300,15 +2512,14 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,2),%eax
 .LOP_APUT_SHORT_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    movw     %cx,(%eax)
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    movw     rINSTw,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
 /* ------------------------------ */
-    .balign 64
 .L_OP_IGET: /* 0x52 */
 /* File: x86/OP_IGET.S */
     /*
@@ -2317,4792 +2528,25 @@
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .LOP_IGET_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_WIDE: /* 0x53 */
-/* File: x86/OP_IGET_WIDE.S */
-    /*
-     * 64-bit instance field get.
-     *
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_WIDE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # for dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_WIDE_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_OBJECT: /* 0x54 */
-/* File: x86/OP_IGET_OBJECT.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_OBJECT_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_OBJECT_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_BOOLEAN: /* 0x55 */
-/* File: x86/OP_IGET_BOOLEAN.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_BOOLEAN_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_BOOLEAN_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_BYTE: /* 0x56 */
-/* File: x86/OP_IGET_BYTE.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_BYTE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_BYTE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_CHAR: /* 0x57 */
-/* File: x86/OP_IGET_CHAR.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_CHAR_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_CHAR_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_SHORT: /* 0x58 */
-/* File: x86/OP_IGET_SHORT.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_SHORT_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_SHORT_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT: /* 0x59 */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_WIDE: /* 0x5a */
-/* File: x86/OP_IPUT_WIDE.S */
-    /*
-     * 64-bit instance field put.
-     *
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_WIDE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_WIDE_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_OBJECT: /* 0x5b */
-/* File: x86/OP_IPUT_OBJECT.S */
-    /*
-     * Object field put.
-     *
-     * for: iput-object
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_OBJECT_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_OBJECT_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_BOOLEAN: /* 0x5c */
-/* File: x86/OP_IPUT_BOOLEAN.S */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_BOOLEAN_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_BOOLEAN_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_BYTE: /* 0x5d */
-/* File: x86/OP_IPUT_BYTE.S */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_BYTE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_BYTE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_CHAR: /* 0x5e */
-/* File: x86/OP_IPUT_CHAR.S */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_CHAR_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_CHAR_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_SHORT: /* 0x5f */
-/* File: x86/OP_IPUT_SHORT.S */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_SHORT_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_SHORT_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET: /* 0x60 */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_resolve                # if not, make it so
-.LOP_SGET_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_WIDE: /* 0x61 */
-/* File: x86/OP_SGET_WIDE.S */
-    /*
-     * 64-bit SGET handler.
-     *
-     */
-    /* sget-wide vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_WIDE_resolve                # if not, make it so
-.LOP_SGET_WIDE_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%ecx    # ecx<- lsw
-    movl      4+offStaticField_value(%eax),%eax  # eax<- msw
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG_WORD %ecx rINST 0
-    SET_VREG_WORD %eax rINST 1
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_OBJECT: /* 0x62 */
-/* File: x86/OP_SGET_OBJECT.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_OBJECT_resolve                # if not, make it so
-.LOP_SGET_OBJECT_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_BOOLEAN: /* 0x63 */
-/* File: x86/OP_SGET_BOOLEAN.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_BOOLEAN_resolve                # if not, make it so
-.LOP_SGET_BOOLEAN_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_BYTE: /* 0x64 */
-/* File: x86/OP_SGET_BYTE.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_BYTE_resolve                # if not, make it so
-.LOP_SGET_BYTE_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_CHAR: /* 0x65 */
-/* File: x86/OP_SGET_CHAR.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_CHAR_resolve                # if not, make it so
-.LOP_SGET_CHAR_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_SHORT: /* 0x66 */
-/* File: x86/OP_SGET_SHORT.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_SHORT_resolve                # if not, make it so
-.LOP_SGET_SHORT_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT: /* 0x67 */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_resolve                # if not, make it so
-.LOP_SPUT_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_WIDE: /* 0x68 */
-/* File: x86/OP_SPUT_WIDE.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_WIDE_resolve                # if not, make it so
-.LOP_SPUT_WIDE_finish:     # field ptr in eax
-    GET_VREG_WORD %ecx rINST 0                  # rINST<- lsw
-    GET_VREG_WORD rINST rINST 1                 # ecx<- msw
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    movl      rINST,4+offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_OBJECT: /* 0x69 */
-/* File: x86/OP_SPUT_OBJECT.S */
-    /*
-     * SPUT object handler.
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_OBJECT_resolve                # if not, make it so
-.LOP_SPUT_OBJECT_finish:                              # field ptr in eax
-    movzbl    rINSTbl,%ecx                       # ecx<- AA
-    GET_VREG_R  %ecx %ecx
-    jmp       .LOP_SPUT_OBJECT_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_BOOLEAN: /* 0x6a */
-/* File: x86/OP_SPUT_BOOLEAN.S */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_BOOLEAN_resolve                # if not, make it so
-.LOP_SPUT_BOOLEAN_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_BYTE: /* 0x6b */
-/* File: x86/OP_SPUT_BYTE.S */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_BYTE_resolve                # if not, make it so
-.LOP_SPUT_BYTE_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_CHAR: /* 0x6c */
-/* File: x86/OP_SPUT_CHAR.S */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_CHAR_resolve                # if not, make it so
-.LOP_SPUT_CHAR_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_SHORT: /* 0x6d */
-/* File: x86/OP_SPUT_SHORT.S */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_SHORT_resolve                # if not, make it so
-.LOP_SPUT_SHORT_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_VIRTUAL: /* 0x6e */
-/* File: x86/OP_INVOKE_VIRTUAL.S */
-
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%eax
-    movzwl    2(rPC),%ecx                 # ecx<- BBBB
-    movl      offGlue_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
-    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
-    testl     %eax,%eax                   # already resolved?
-    jne       .LOP_INVOKE_VIRTUAL_continue        # yes, continue
-    movl      rGLUE,%eax
-    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
-    movl      offGlue_method(%eax),%eax   # eax<- glue->method
-    jmp       .LOP_INVOKE_VIRTUAL_more
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_SUPER: /* 0x6f */
-/* File: x86/OP_INVOKE_SUPER.S */
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,rINST
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(rINST),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
-    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
-    movl      offGlue_method(rINST),%eax # eax<- method
-    movzwl    4(rPC),rINST              # rINST<- GFED or CCCC
-    .if       (!0)
-    andl      $0xf,rINST               # rINST<- D (or stays CCCC)
-    .endif
-    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
-    testl     rINST,rINST               # null "this"?
-    je        common_errNullObject      # yes, throw
-    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
-    testl     %ecx,%ecx                 # already resolved?
-    jne       .LOP_INVOKE_SUPER_continue      # yes - go on
-    jmp       .LOP_INVOKE_SUPER_resolve
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_DIRECT: /* 0x70 */
-/* File: x86/OP_INVOKE_DIRECT.S */
-    /*
-     * Handle a direct method call.
-     *
-     * (We could defer the "is 'this' pointer null" test to the common
-     * method invocation code, and use a flag to indicate that static
-     * calls don't count.  If we do this as part of copying the arguments
-     * out we could avoiding loading the first arg twice.)
-     *
-     * for: invoke-direct, invoke-direct/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
-    movzwl    4(rPC),%edx              # edx<- GFED or CCCC
-    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
-    .if       (!0)
-    andl      $0xf,%edx               # edx<- D (or stays CCCC)
-    .endif
-    testl     %eax,%eax                # already resolved?
-    GET_VREG_R  %ecx %edx              # ecx<- "this" ptr
-    je        .LOP_INVOKE_DIRECT_resolve      # not resolved, do it now
-.LOP_INVOKE_DIRECT_finish:
-    testl     %ecx,%ecx                # null "this"?
-    jne       common_invokeMethodNoRange  # no, continue on
-    jmp       common_errNullObject
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_STATIC: /* 0x71 */
-/* File: x86/OP_INVOKE_STATIC.S */
-    /*
-     * Handle a static method call.
-     *
-     * for: invoke-static, invoke-static/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
-    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
-    testl     %eax,%eax
-    jne       common_invokeMethodNoRange
-    movl      rGLUE,%ecx
-    movl      offGlue_method(%ecx),%ecx # ecx<- glue->method
-    movzwl    2(rPC),%eax
-    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
-    movl      %eax,OUT_ARG1(%esp)       # arg1<- BBBB
-    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
-    jmp       .LOP_INVOKE_STATIC_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_INTERFACE: /* 0x72 */
-/* File: x86/OP_INVOKE_INTERFACE.S */
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movzwl     4(rPC),%eax              # eax<- FEDC or CCCC
-    movl       rGLUE,%ecx
-    .if        (!0)
-    andl       $0xf,%eax               # eax<- C (or stays CCCC)
-    .endif
-    GET_VREG_R   %eax %eax              # eax<- "this"
-    EXPORT_PC
-    testl      %eax,%eax                # null this?
-    je         common_errNullObject     # yes, fail
-    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
-    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
-    movl       offGlue_methodClassDex(%ecx),%eax   # eax<- methodClassDex
-    movl       offGlue_method(%ecx),%ecx           # ecx<- method
-    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
-    movzwl     2(rPC),%eax                         # eax<- BBBB
-    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
-    movl       %eax,OUT_ARG1(%esp)                 # arg1<- BBBB
-    jmp        .LOP_INVOKE_INTERFACE_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_UNUSED_73: /* 0x73 */
-/* File: x86/OP_UNUSED_73.S */
-/* File: x86/unused.S */
-    jmp     common_abort
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
-/* File: x86/OP_INVOKE_VIRTUAL_RANGE.S */
-/* File: x86/OP_INVOKE_VIRTUAL.S */
-
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%eax
-    movzwl    2(rPC),%ecx                 # ecx<- BBBB
-    movl      offGlue_methodClassDex(%eax),%eax  # eax<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
-    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
-    testl     %eax,%eax                   # already resolved?
-    jne       .LOP_INVOKE_VIRTUAL_RANGE_continue        # yes, continue
-    movl      rGLUE,%eax
-    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
-    movl      offGlue_method(%eax),%eax   # eax<- glue->method
-    jmp       .LOP_INVOKE_VIRTUAL_RANGE_more
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
-/* File: x86/OP_INVOKE_SUPER_RANGE.S */
-/* File: x86/OP_INVOKE_SUPER.S */
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,rINST
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(rINST),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
-    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
-    movl      offGlue_method(rINST),%eax # eax<- method
-    movzwl    4(rPC),rINST              # rINST<- GFED or CCCC
-    .if       (!1)
-    andl      $0xf,rINST               # rINST<- D (or stays CCCC)
-    .endif
-    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
-    testl     rINST,rINST               # null "this"?
-    je        common_errNullObject      # yes, throw
-    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
-    testl     %ecx,%ecx                 # already resolved?
-    jne       .LOP_INVOKE_SUPER_RANGE_continue      # yes - go on
-    jmp       .LOP_INVOKE_SUPER_RANGE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
-/* File: x86/OP_INVOKE_DIRECT_RANGE.S */
-/* File: x86/OP_INVOKE_DIRECT.S */
-    /*
-     * Handle a direct method call.
-     *
-     * (We could defer the "is 'this' pointer null" test to the common
-     * method invocation code, and use a flag to indicate that static
-     * calls don't count.  If we do this as part of copying the arguments
-     * out we could avoiding loading the first arg twice.)
-     *
-     * for: invoke-direct, invoke-direct/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
-    movzwl    4(rPC),%edx              # edx<- GFED or CCCC
-    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
-    .if       (!1)
-    andl      $0xf,%edx               # edx<- D (or stays CCCC)
-    .endif
-    testl     %eax,%eax                # already resolved?
-    GET_VREG_R  %ecx %edx              # ecx<- "this" ptr
-    je        .LOP_INVOKE_DIRECT_RANGE_resolve      # not resolved, do it now
-.LOP_INVOKE_DIRECT_RANGE_finish:
-    testl     %ecx,%ecx                # null "this"?
-    jne       common_invokeMethodRange  # no, continue on
-    jmp       common_errNullObject
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
-/* File: x86/OP_INVOKE_STATIC_RANGE.S */
-/* File: x86/OP_INVOKE_STATIC.S */
-    /*
-     * Handle a static method call.
-     *
-     * for: invoke-static, invoke-static/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
-    EXPORT_PC
-    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
-    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
-    testl     %eax,%eax
-    jne       common_invokeMethodRange
-    movl      rGLUE,%ecx
-    movl      offGlue_method(%ecx),%ecx # ecx<- glue->method
-    movzwl    2(rPC),%eax
-    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
-    movl      %eax,OUT_ARG1(%esp)       # arg1<- BBBB
-    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
-    jmp       .LOP_INVOKE_STATIC_RANGE_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
-/* File: x86/OP_INVOKE_INTERFACE_RANGE.S */
-/* File: x86/OP_INVOKE_INTERFACE.S */
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movzwl     4(rPC),%eax              # eax<- FEDC or CCCC
-    movl       rGLUE,%ecx
-    .if        (!1)
-    andl       $0xf,%eax               # eax<- C (or stays CCCC)
-    .endif
-    GET_VREG_R   %eax %eax              # eax<- "this"
-    EXPORT_PC
-    testl      %eax,%eax                # null this?
-    je         common_errNullObject     # yes, fail
-    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
-    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
-    movl       offGlue_methodClassDex(%ecx),%eax   # eax<- methodClassDex
-    movl       offGlue_method(%ecx),%ecx           # ecx<- method
-    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
-    movzwl     2(rPC),%eax                         # eax<- BBBB
-    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
-    movl       %eax,OUT_ARG1(%esp)                 # arg1<- BBBB
-    jmp        .LOP_INVOKE_INTERFACE_RANGE_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_UNUSED_79: /* 0x79 */
-/* File: x86/OP_UNUSED_79.S */
-/* File: x86/unused.S */
-    jmp     common_abort
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_UNUSED_7A: /* 0x7a */
-/* File: x86/OP_UNUSED_7A.S */
-/* File: x86/unused.S */
-    jmp     common_abort
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NEG_INT: /* 0x7b */
-/* File: x86/OP_NEG_INT.S */
-/* File: x86/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op eax".
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx           # ecx<- A+
-    sarl     $4,rINST             # rINST<- B
-    GET_VREG_R %eax rINST           # eax<- vB
-    andb     $0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    
-    negl %eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NOT_INT: /* 0x7c */
-/* File: x86/OP_NOT_INT.S */
-/* File: x86/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op eax".
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx           # ecx<- A+
-    sarl     $4,rINST             # rINST<- B
-    GET_VREG_R %eax rINST           # eax<- vB
-    andb     $0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    
-    notl %eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NEG_LONG: /* 0x7d */
-/* File: x86/OP_NEG_LONG.S */
-    /* unop vA, vB */
-    movzbl    rINSTbl,%ecx        # ecx<- BA
-    sarl      $4,%ecx            # ecx<- B
-    andb      $0xf,rINSTbl       # rINST<- A
-    GET_VREG_WORD %eax %ecx 0     # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1     # ecx<- v[B+1]
-    negl      %eax
-    adcl      $0,%ecx
-    negl      %ecx
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG_WORD %eax rINST 0    # v[A+0]<- eax
-    SET_VREG_WORD %ecx rINST 1    # v[A+1]<- ecx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NOT_LONG: /* 0x7e */
-/* File: x86/OP_NOT_LONG.S */
-    /* unop vA, vB */
-    movzbl    rINSTbl,%ecx       # ecx<- BA
-    sarl      $4,%ecx           # ecx<- B
-    andb      $0xf,rINSTbl      # rINST<- A
-    GET_VREG_WORD %eax %ecx 0    # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1    # ecx<- v[B+1]
-    FETCH_INST_OPCODE 1 %edx
-    notl      %eax
-    notl      %ecx
-    SET_VREG_WORD %eax rINST 0   # v[A+0]<- eax
-    SET_VREG_WORD %ecx rINST 1   # v[A+1]<- ecx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NEG_FLOAT: /* 0x7f */
-/* File: x86/OP_NEG_FLOAT.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    flds    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fchs
-    fstps  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_NEG_DOUBLE: /* 0x80 */
-/* File: x86/OP_NEG_DOUBLE.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fldl    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fchs
-    fstpl  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_LONG: /* 0x81 */
-/* File: x86/OP_INT_TO_LONG.S */
-    /* int to long vA, vB */
-    movzbl  rINSTbl,%eax                # eax<- +A
-    sarl    $4,%eax                    # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    andb    $0xf,rINSTbl               # rINST<- A
-    cltd                                # edx:eax<- sssssssBBBBBBBB
-    SET_VREG_WORD %edx rINST 1          # v[A+1]<- edx/rPC
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG_WORD %eax rINST 0          # v[A+0]<- %eax
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_FLOAT: /* 0x82 */
-/* File: x86/OP_INT_TO_FLOAT.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fildl    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstps  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_DOUBLE: /* 0x83 */
-/* File: x86/OP_INT_TO_DOUBLE.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fildl    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstpl  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_LONG_TO_INT: /* 0x84 */
-/* File: x86/OP_LONG_TO_INT.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: x86/OP_MOVE.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movzbl rINSTbl,%eax          # eax<- BA
-    andb   $0xf,%al             # eax<- A
-    shrl   $4,rINST            # rINST<- B
-    GET_VREG_R %ecx rINST
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    SET_VREG %ecx %eax           # fp[A]<-fp[B]
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_LONG_TO_FLOAT: /* 0x85 */
-/* File: x86/OP_LONG_TO_FLOAT.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fildll    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstps  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_LONG_TO_DOUBLE: /* 0x86 */
-/* File: x86/OP_LONG_TO_DOUBLE.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fildll    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstpl  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_FLOAT_TO_INT: /* 0x87 */
-/* File: x86/OP_FLOAT_TO_INT.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl    rINSTbl,%ecx       # ecx<- A+
-    sarl      $4,rINST         # rINST<- B
-    .if 0
-    fldl     (rFP,rINST,4)       # %st0<- vB
-    .else
-    flds     (rFP,rINST,4)       # %st0<- vB
-    .endif
-    ftst
-    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
-    movzwl   LOCAL0_OFFSET(%ebp),%eax
-    movb     $0xc,%ah
-    movw     %ax,LOCAL0_OFFSET+2(%ebp)
-    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
-    FETCH_INST_OPCODE 1 %edx
-    andb     $0xf,%cl                # ecx<- A
-    .if 0
-    fistpll  (rFP,%ecx,4)             # convert and store
-    .else
-    fistpl   (rFP,%ecx,4)             # convert and store
-    .endif
-    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
-    jmp      .LOP_FLOAT_TO_INT_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_FLOAT_TO_LONG: /* 0x88 */
-/* File: x86/OP_FLOAT_TO_LONG.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl    rINSTbl,%ecx       # ecx<- A+
-    sarl      $4,rINST         # rINST<- B
-    .if 0
-    fldl     (rFP,rINST,4)       # %st0<- vB
-    .else
-    flds     (rFP,rINST,4)       # %st0<- vB
-    .endif
-    ftst
-    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
-    movzwl   LOCAL0_OFFSET(%ebp),%eax
-    movb     $0xc,%ah
-    movw     %ax,LOCAL0_OFFSET+2(%ebp)
-    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
-    FETCH_INST_OPCODE 1 %edx
-    andb     $0xf,%cl                # ecx<- A
-    .if 1
-    fistpll  (rFP,%ecx,4)             # convert and store
-    .else
-    fistpl   (rFP,%ecx,4)             # convert and store
-    .endif
-    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
-    jmp      .LOP_FLOAT_TO_LONG_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
-/* File: x86/OP_FLOAT_TO_DOUBLE.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    flds    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstpl  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DOUBLE_TO_INT: /* 0x8a */
-/* File: x86/OP_DOUBLE_TO_INT.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl    rINSTbl,%ecx       # ecx<- A+
-    sarl      $4,rINST         # rINST<- B
-    .if 1
-    fldl     (rFP,rINST,4)       # %st0<- vB
-    .else
-    flds     (rFP,rINST,4)       # %st0<- vB
-    .endif
-    ftst
-    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
-    movzwl   LOCAL0_OFFSET(%ebp),%eax
-    movb     $0xc,%ah
-    movw     %ax,LOCAL0_OFFSET+2(%ebp)
-    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
-    FETCH_INST_OPCODE 1 %edx
-    andb     $0xf,%cl                # ecx<- A
-    .if 0
-    fistpll  (rFP,%ecx,4)             # convert and store
-    .else
-    fistpl   (rFP,%ecx,4)             # convert and store
-    .endif
-    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
-    jmp      .LOP_DOUBLE_TO_INT_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DOUBLE_TO_LONG: /* 0x8b */
-/* File: x86/OP_DOUBLE_TO_LONG.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl    rINSTbl,%ecx       # ecx<- A+
-    sarl      $4,rINST         # rINST<- B
-    .if 1
-    fldl     (rFP,rINST,4)       # %st0<- vB
-    .else
-    flds     (rFP,rINST,4)       # %st0<- vB
-    .endif
-    ftst
-    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
-    movzwl   LOCAL0_OFFSET(%ebp),%eax
-    movb     $0xc,%ah
-    movw     %ax,LOCAL0_OFFSET+2(%ebp)
-    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
-    FETCH_INST_OPCODE 1 %edx
-    andb     $0xf,%cl                # ecx<- A
-    .if 1
-    fistpll  (rFP,%ecx,4)             # convert and store
-    .else
-    fistpl   (rFP,%ecx,4)             # convert and store
-    .endif
-    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
-    jmp      .LOP_DOUBLE_TO_LONG_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
-/* File: x86/OP_DOUBLE_TO_FLOAT.S */
-/* File: x86/fpcvt.S */
-    /*
-     * Generic 32-bit FP conversion operation.
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx       # ecx<- A+
-    sarl     $4,rINST         # rINST<- B
-    fldl    (rFP,rINST,4)      # %st0<- vB
-    andb     $0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    fstps  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_BYTE: /* 0x8d */
-/* File: x86/OP_INT_TO_BYTE.S */
-/* File: x86/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op eax".
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx           # ecx<- A+
-    sarl     $4,rINST             # rINST<- B
-    GET_VREG_R %eax rINST           # eax<- vB
-    andb     $0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    
-    movsbl %al,%eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_CHAR: /* 0x8e */
-/* File: x86/OP_INT_TO_CHAR.S */
-/* File: x86/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op eax".
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx           # ecx<- A+
-    sarl     $4,rINST             # rINST<- B
-    GET_VREG_R %eax rINST           # eax<- vB
-    andb     $0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    
-    movzwl %ax,%eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INT_TO_SHORT: /* 0x8f */
-/* File: x86/OP_INT_TO_SHORT.S */
-/* File: x86/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op eax".
-     */
-    /* unop vA, vB */
-    movzbl   rINSTbl,%ecx           # ecx<- A+
-    sarl     $4,rINST             # rINST<- B
-    GET_VREG_R %eax rINST           # eax<- vB
-    andb     $0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    
-    
-    movswl %ax,%eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_INT: /* 0x90 */
-/* File: x86/OP_ADD_INT.S */
-/* File: x86/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int, sub-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax   # eax<- BB
-    movzbl   3(rPC),%ecx   # ecx<- CC
-    GET_VREG_R %eax %eax   # eax<- vBB
-    addl (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_INT: /* 0x91 */
-/* File: x86/OP_SUB_INT.S */
-/* File: x86/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int, sub-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax   # eax<- BB
-    movzbl   3(rPC),%ecx   # ecx<- CC
-    GET_VREG_R %eax %eax   # eax<- vBB
-    subl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_INT: /* 0x92 */
-/* File: x86/OP_MUL_INT.S */
-    /*
-     * 32-bit binary multiplication.
-     */
-    /* mul vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    imull    (rFP,%ecx,4),%eax      # trashes edx
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_INT: /* 0x93 */
-/* File: x86/OP_DIV_INT.S */
-/* File: x86/bindiv.S */
-
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_DIV_INT_continue_div
-    cmpl     $0x80000000,%eax
-    jne      .LOP_DIV_INT_continue_div
-    movl     $0x80000000,%eax
-    jmp      .LOP_DIV_INT_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_INT: /* 0x94 */
-/* File: x86/OP_REM_INT.S */
-/* File: x86/bindiv.S */
-
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_REM_INT_continue_div
-    cmpl     $0x80000000,%eax
-    jne      .LOP_REM_INT_continue_div
-    movl     $0,%edx
-    jmp      .LOP_REM_INT_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_INT: /* 0x95 */
-/* File: x86/OP_AND_INT.S */
-/* File: x86/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int, sub-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax   # eax<- BB
-    movzbl   3(rPC),%ecx   # ecx<- CC
-    GET_VREG_R %eax %eax   # eax<- vBB
-    andl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_INT: /* 0x96 */
-/* File: x86/OP_OR_INT.S */
-/* File: x86/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int, sub-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax   # eax<- BB
-    movzbl   3(rPC),%ecx   # ecx<- CC
-    GET_VREG_R %eax %eax   # eax<- vBB
-    orl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_INT: /* 0x97 */
-/* File: x86/OP_XOR_INT.S */
-/* File: x86/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int, sub-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax   # eax<- BB
-    movzbl   3(rPC),%ecx   # ecx<- CC
-    GET_VREG_R %eax %eax   # eax<- vBB
-    xorl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHL_INT: /* 0x98 */
-/* File: x86/OP_SHL_INT.S */
-/* File: x86/binop1.S */
-    /*
-     * Generic 32-bit binary operation in which both operands loaded to
-     * registers (op0 in eax, op1 in ecx).
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    sall    %cl,%eax                          # ex: addl    %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHR_INT: /* 0x99 */
-/* File: x86/OP_SHR_INT.S */
-/* File: x86/binop1.S */
-    /*
-     * Generic 32-bit binary operation in which both operands loaded to
-     * registers (op0 in eax, op1 in ecx).
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    sarl    %cl,%eax                          # ex: addl    %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_USHR_INT: /* 0x9a */
-/* File: x86/OP_USHR_INT.S */
-/* File: x86/binop1.S */
-    /*
-     * Generic 32-bit binary operation in which both operands loaded to
-     * registers (op0 in eax, op1 in ecx).
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax            # eax<- BB
-    movzbl   3(rPC),%ecx            # ecx<- CC
-    GET_VREG_R %eax %eax            # eax<- vBB
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    shrl    %cl,%eax                          # ex: addl    %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_LONG: /* 0x9b */
-/* File: x86/OP_ADD_LONG.S */
-/* File: x86/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop vAA, vBB, vCC */
-
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
-    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    addl (rFP,%ecx,4),%edx         # ex: addl   (rFP,%ecx,4),%edx
-    adcl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_LONG: /* 0x9c */
-/* File: x86/OP_SUB_LONG.S */
-/* File: x86/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop vAA, vBB, vCC */
-
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
-    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    subl (rFP,%ecx,4),%edx         # ex: addl   (rFP,%ecx,4),%edx
-    sbbl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_LONG: /* 0x9d */
-/* File: x86/OP_MUL_LONG.S */
-    /*
-     * Signed 64-bit integer multiply.
-     *
-     * We could definately use more free registers for
-     * this code.   We spill rINSTw (ebx),
-     * giving us eax, ebc, ecx and edx as computational
-     * temps.  On top of that, we'll spill edi (rFP)
-     * for use as the vB pointer and esi (rPC) for use
-     * as the vC pointer.  Yuck.
-     */
-    /* mul-long vAA, vBB, vCC */
-    movzbl    2(rPC),%eax              # eax<- B
-    movzbl    3(rPC),%ecx              # ecx<- C
-    SPILL_TMP2(%esi)                   # save Dalvik PC
-    SPILL(rFP)
-    SPILL(rINST)
-    leal      (rFP,%eax,4),%esi        # esi<- &v[B]
-    leal      (rFP,%ecx,4),rFP         # rFP<- &v[C]
-    movl      4(%esi),%ecx             # ecx<- Bmsw
-    imull     (rFP),%ecx               # ecx<- (Bmsw*Clsw)
-    movl      4(rFP),%eax              # eax<- Cmsw
-    imull     (%esi),%eax              # eax<- (Cmsw*Blsw)
-    addl      %eax,%ecx                # ecx<- (Bmsw*Clsw)+(Cmsw*Blsw)
-    movl      (rFP),%eax               # eax<- Clsw
-    mull      (%esi)                   # eax<- (Clsw*Alsw)
-    UNSPILL(rINST)
-    UNSPILL(rFP)
-    jmp       .LOP_MUL_LONG_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_LONG: /* 0x9e */
-/* File: x86/OP_DIV_LONG.S */
-    /* div vAA, vBB, vCC */
-    movzbl    3(rPC),%eax              # eax<- CC
-    movzbl    2(rPC),%ecx              # ecx<- BB
-    GET_VREG_WORD %edx %eax 0
-    GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
-    testl    %eax,%eax
-    je       .LOP_DIV_LONG_check_zero
-    cmpl     $-1,%eax
-    je       .LOP_DIV_LONG_check_neg1
-.LOP_DIV_LONG_notSpecial:
-    GET_VREG_WORD %edx %ecx 0
-    GET_VREG_WORD %ecx %ecx 1
-.LOP_DIV_LONG_notSpecial1:
-    movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
-    movl     %ecx,OUT_ARG1(%esp)
-    jmp      .LOP_DIV_LONG_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_LONG: /* 0x9f */
-/* File: x86/OP_REM_LONG.S */
-/* File: x86/OP_DIV_LONG.S */
-    /* div vAA, vBB, vCC */
-    movzbl    3(rPC),%eax              # eax<- CC
-    movzbl    2(rPC),%ecx              # ecx<- BB
-    GET_VREG_WORD %edx %eax 0
-    GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
-    testl    %eax,%eax
-    je       .LOP_REM_LONG_check_zero
-    cmpl     $-1,%eax
-    je       .LOP_REM_LONG_check_neg1
-.LOP_REM_LONG_notSpecial:
-    GET_VREG_WORD %edx %ecx 0
-    GET_VREG_WORD %ecx %ecx 1
-.LOP_REM_LONG_notSpecial1:
-    movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
-    movl     %ecx,OUT_ARG1(%esp)
-    jmp      .LOP_REM_LONG_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_LONG: /* 0xa0 */
-/* File: x86/OP_AND_LONG.S */
-/* File: x86/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop vAA, vBB, vCC */
-
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
-    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    andl (rFP,%ecx,4),%edx         # ex: addl   (rFP,%ecx,4),%edx
-    andl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_LONG: /* 0xa1 */
-/* File: x86/OP_OR_LONG.S */
-/* File: x86/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop vAA, vBB, vCC */
-
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
-    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    orl (rFP,%ecx,4),%edx         # ex: addl   (rFP,%ecx,4),%edx
-    orl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_LONG: /* 0xa2 */
-/* File: x86/OP_XOR_LONG.S */
-/* File: x86/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop vAA, vBB, vCC */
-
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
-    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    xorl (rFP,%ecx,4),%edx         # ex: addl   (rFP,%ecx,4),%edx
-    xorl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHL_LONG: /* 0xa3 */
-/* File: x86/OP_SHL_LONG.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.  x86 shifts automatically mask off
-     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
-     * case specially.
-     */
-    /* shl-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # ecx<- v[BB+1]
-    GET_VREG_R   %ecx %ecx              # ecx<- vCC
-    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shldl     %eax,%edx
-    sall      %cl,%eax
-    testb     $32,%cl
-    je        2f
-    movl      %eax,%edx
-    xorl      %eax,%eax
-2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- %edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .LOP_SHL_LONG_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHR_LONG: /* 0xa4 */
-/* File: x86/OP_SHR_LONG.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.  x86 shifts automatically mask off
-     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
-     * case specially.
-     */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # edx<- v[BB+1]
-    GET_VREG_R   %ecx %ecx              # ecx<- vCC
-    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shrdl     %edx,%eax
-    sarl      %cl,%edx
-    testb     $32,%cl
-    je        2f
-    movl      %edx,%eax
-    sarl      $31,%edx
-2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .LOP_SHR_LONG_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_USHR_LONG: /* 0xa5 */
-/* File: x86/OP_USHR_LONG.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.  x86 shifts automatically mask off
-     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
-     * case specially.
-     */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    2(rPC),%eax               # eax<- BB
-    movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # edx<- v[BB+1]
-    GET_VREG_R  %ecx %ecx               # ecx<- vCC
-    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shrdl     %edx,%eax
-    shrl      %cl,%edx
-    testb     $32,%cl
-    je        2f
-    movl      %edx,%eax
-    xorl      %edx,%edx
-2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .LOP_USHR_LONG_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_FLOAT: /* 0xa6 */
-/* File: x86/OP_ADD_FLOAT.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    flds    (rFP,%eax,4)         # vCC to fp stack
-    fadds   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstps   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_FLOAT: /* 0xa7 */
-/* File: x86/OP_SUB_FLOAT.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    flds    (rFP,%eax,4)         # vCC to fp stack
-    fsubs   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstps   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_FLOAT: /* 0xa8 */
-/* File: x86/OP_MUL_FLOAT.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    flds    (rFP,%eax,4)         # vCC to fp stack
-    fmuls   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstps   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_FLOAT: /* 0xa9 */
-/* File: x86/OP_DIV_FLOAT.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    flds    (rFP,%eax,4)         # vCC to fp stack
-    fdivs   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstps   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_FLOAT: /* 0xaa */
-/* File: x86/OP_REM_FLOAT.S */
-    /* rem_float vAA, vBB, vCC */
-    movzbl   3(rPC),%ecx            # ecx<- BB
-    movzbl   2(rPC),%eax            # eax<- CC
-    flds     (rFP,%ecx,4)           # vCC to fp stack
-    flds     (rFP,%eax,4)           # vCC to fp stack
-    movzbl   rINSTbl,%ecx           # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
-1:
-    fprem
-    fstsw     %ax
-    sahf
-    jp        1b
-    fstp      %st(1)
-    ADVANCE_PC 2
-    fstps    (rFP,%ecx,4)           # %st to vAA
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_DOUBLE: /* 0xab */
-/* File: x86/OP_ADD_DOUBLE.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    fldl    (rFP,%eax,4)         # vCC to fp stack
-    faddl   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstpl   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_DOUBLE: /* 0xac */
-/* File: x86/OP_SUB_DOUBLE.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    fldl    (rFP,%eax,4)         # vCC to fp stack
-    fsubl   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstpl   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_DOUBLE: /* 0xad */
-/* File: x86/OP_MUL_DOUBLE.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    fldl    (rFP,%eax,4)         # vCC to fp stack
-    fmull   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstpl   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_DOUBLE: /* 0xae */
-/* File: x86/OP_DIV_DOUBLE.S */
-/* File: x86/binflop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-    /* binop vAA, vBB, vCC */
-    movzbl   2(rPC),%eax          # eax<- CC
-    movzbl   3(rPC),%ecx          # ecx<- BB
-    fldl    (rFP,%eax,4)         # vCC to fp stack
-    fdivl   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    fstpl   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_DOUBLE: /* 0xaf */
-/* File: x86/OP_REM_DOUBLE.S */
-    /* rem_float vAA, vBB, vCC */
-    movzbl   3(rPC),%ecx            # ecx<- BB
-    movzbl   2(rPC),%eax            # eax<- CC
-    fldl     (rFP,%ecx,4)           # vCC to fp stack
-    fldl     (rFP,%eax,4)           # vCC to fp stack
-    movzbl   rINSTbl,%ecx           # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
-1:
-    fprem
-    fstsw     %ax
-    sahf
-    jp        1b
-    fstp      %st(1)
-    ADVANCE_PC 2
-    fstpl    (rFP,%ecx,4)           # %st to vAA
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_INT_2ADDR: /* 0xb0 */
-/* File: x86/OP_ADD_INT_2ADDR.S */
-/* File: x86/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
-    andb    $0xf,%cl                  # ecx<- A
-    addl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_INT_2ADDR: /* 0xb1 */
-/* File: x86/OP_SUB_INT_2ADDR.S */
-/* File: x86/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
-    andb    $0xf,%cl                  # ecx<- A
-    subl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_INT_2ADDR: /* 0xb2 */
-/* File: x86/OP_MUL_INT_2ADDR.S */
-    /* mul vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    andb    $0xf,%cl                  # ecx<- A
-    imull   (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG %eax %ecx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_INT_2ADDR: /* 0xb3 */
-/* File: x86/OP_DIV_INT_2ADDR.S */
-/* File: x86/bindiv2addr.S */
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* div/rem/2addr vA, vB */
-    movzx    rINSTbl,%ecx          # eax<- BA
-    sarl     $4,%ecx              # ecx<- B
-    GET_VREG_R %ecx %ecx           # eax<- vBB
-    andb     $0xf,rINSTbl         # rINST<- A
-    GET_VREG_R %eax rINST          # eax<- vBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_DIV_INT_2ADDR_continue_div2addr
-    cmpl     $0x80000000,%eax
-    jne      .LOP_DIV_INT_2ADDR_continue_div2addr
-    movl     $0x80000000,%eax
-    jmp      .LOP_DIV_INT_2ADDR_finish_div2addr
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_INT_2ADDR: /* 0xb4 */
-/* File: x86/OP_REM_INT_2ADDR.S */
-/* File: x86/bindiv2addr.S */
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* div/rem/2addr vA, vB */
-    movzx    rINSTbl,%ecx          # eax<- BA
-    sarl     $4,%ecx              # ecx<- B
-    GET_VREG_R %ecx %ecx           # eax<- vBB
-    andb     $0xf,rINSTbl         # rINST<- A
-    GET_VREG_R %eax rINST          # eax<- vBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_REM_INT_2ADDR_continue_div2addr
-    cmpl     $0x80000000,%eax
-    jne      .LOP_REM_INT_2ADDR_continue_div2addr
-    movl     $0,%edx
-    jmp      .LOP_REM_INT_2ADDR_finish_div2addr
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_INT_2ADDR: /* 0xb5 */
-/* File: x86/OP_AND_INT_2ADDR.S */
-/* File: x86/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
-    andb    $0xf,%cl                  # ecx<- A
-    andl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_INT_2ADDR: /* 0xb6 */
-/* File: x86/OP_OR_INT_2ADDR.S */
-/* File: x86/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
-    andb    $0xf,%cl                  # ecx<- A
-    orl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_INT_2ADDR: /* 0xb7 */
-/* File: x86/OP_XOR_INT_2ADDR.S */
-/* File: x86/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
-    sarl    $4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
-    andb    $0xf,%cl                  # ecx<- A
-    xorl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHL_INT_2ADDR: /* 0xb8 */
-/* File: x86/OP_SHL_INT_2ADDR.S */
-/* File: x86/shop2addr.S */
-    /*
-     * Generic 32-bit "shift/2addr" operation.
-     */
-    /* shift/2addr vA, vB */
-    movzx    rINSTbl,%ecx           # eax<- BA
-    sarl     $4,%ecx               # ecx<- B
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    andb     $0xf,rINSTbl          # rINST<- A
-    GET_VREG_R %eax rINST           # eax<- vAA
-    sall    %cl,%eax                          # ex: sarl %cl,%eax
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG %eax rINST
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHR_INT_2ADDR: /* 0xb9 */
-/* File: x86/OP_SHR_INT_2ADDR.S */
-/* File: x86/shop2addr.S */
-    /*
-     * Generic 32-bit "shift/2addr" operation.
-     */
-    /* shift/2addr vA, vB */
-    movzx    rINSTbl,%ecx           # eax<- BA
-    sarl     $4,%ecx               # ecx<- B
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    andb     $0xf,rINSTbl          # rINST<- A
-    GET_VREG_R %eax rINST           # eax<- vAA
-    sarl    %cl,%eax                          # ex: sarl %cl,%eax
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG %eax rINST
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_USHR_INT_2ADDR: /* 0xba */
-/* File: x86/OP_USHR_INT_2ADDR.S */
-/* File: x86/shop2addr.S */
-    /*
-     * Generic 32-bit "shift/2addr" operation.
-     */
-    /* shift/2addr vA, vB */
-    movzx    rINSTbl,%ecx           # eax<- BA
-    sarl     $4,%ecx               # ecx<- B
-    GET_VREG_R %ecx %ecx            # eax<- vBB
-    andb     $0xf,rINSTbl          # rINST<- A
-    GET_VREG_R %eax rINST           # eax<- vAA
-    shrl    %cl,%eax                          # ex: sarl %cl,%eax
-    FETCH_INST_OPCODE 1 %edx
-    SET_VREG %eax rINST
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_LONG_2ADDR: /* 0xbb */
-/* File: x86/OP_ADD_LONG_2ADDR.S */
-/* File: x86/binopWide2addr.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop/2addr vA, vB */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
-    andb      $0xF,rINSTbl             # rINST<- A
-    addl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
-    adcl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_LONG_2ADDR: /* 0xbc */
-/* File: x86/OP_SUB_LONG_2ADDR.S */
-/* File: x86/binopWide2addr.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop/2addr vA, vB */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
-    andb      $0xF,rINSTbl             # rINST<- A
-    subl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
-    sbbl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_LONG_2ADDR: /* 0xbd */
-/* File: x86/OP_MUL_LONG_2ADDR.S */
-    /*
-     * Signed 64-bit integer multiply, 2-addr version
-     *
-     * We could definately use more free registers for
-     * this code.  We must spill %edx (edx) because it
-     * is used by imul.  We'll also spill rINST (ebx),
-     * giving us eax, ebc, ecx and edx as computational
-     * temps.  On top of that, we'll spill %esi (edi)
-     * for use as the vA pointer and rFP (esi) for use
-     * as the vB pointer.  Yuck.
-     */
-    /* mul-long/2addr vA, vB */
-    movzbl    rINSTbl,%eax             # eax<- BA
-    andb      $0xf,%al                # eax<- A
-    sarl      $4,rINST                # rINST<- B
-    SPILL_TMP2(%esi)
-    SPILL(rFP)
-    leal      (rFP,%eax,4),%esi        # %esi<- &v[A]
-    leal      (rFP,rINST,4),rFP        # rFP<- &v[B]
-    movl      4(%esi),%ecx             # ecx<- Amsw
-    imull     (rFP),%ecx               # ecx<- (Amsw*Blsw)
-    movl      4(rFP),%eax              # eax<- Bmsw
-    imull     (%esi),%eax              # eax<- (Bmsw*Alsw)
-    addl      %eax,%ecx                # ecx<- (Amsw*Blsw)+(Bmsw*Alsw)
-    movl      (rFP),%eax               # eax<- Blsw
-    mull      (%esi)                   # eax<- (Blsw*Alsw)
-    jmp       .LOP_MUL_LONG_2ADDR_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_LONG_2ADDR: /* 0xbe */
-/* File: x86/OP_DIV_LONG_2ADDR.S */
-    /* div/2addr vA, vB */
-    movzbl    rINSTbl,%eax
-    shrl      $4,%eax                  # eax<- B
-    andb      $0xf,rINSTbl             # rINST<- A
-    GET_VREG_WORD %edx %eax 0
-    GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
-    testl    %eax,%eax
-    je       .LOP_DIV_LONG_2ADDR_check_zero
-    cmpl     $-1,%eax
-    je       .LOP_DIV_LONG_2ADDR_check_neg1
-.LOP_DIV_LONG_2ADDR_notSpecial:
-    GET_VREG_WORD %edx rINST 0
-    GET_VREG_WORD %ecx rINST 1
-.LOP_DIV_LONG_2ADDR_notSpecial1:
-    jmp      .LOP_DIV_LONG_2ADDR_continue
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_LONG_2ADDR: /* 0xbf */
-/* File: x86/OP_REM_LONG_2ADDR.S */
-/* File: x86/OP_DIV_LONG_2ADDR.S */
-    /* div/2addr vA, vB */
-    movzbl    rINSTbl,%eax
-    shrl      $4,%eax                  # eax<- B
-    andb      $0xf,rINSTbl             # rINST<- A
-    GET_VREG_WORD %edx %eax 0
-    GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
-    testl    %eax,%eax
-    je       .LOP_REM_LONG_2ADDR_check_zero
-    cmpl     $-1,%eax
-    je       .LOP_REM_LONG_2ADDR_check_neg1
-.LOP_REM_LONG_2ADDR_notSpecial:
-    GET_VREG_WORD %edx rINST 0
-    GET_VREG_WORD %ecx rINST 1
-.LOP_REM_LONG_2ADDR_notSpecial1:
-    jmp      .LOP_REM_LONG_2ADDR_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_LONG_2ADDR: /* 0xc0 */
-/* File: x86/OP_AND_LONG_2ADDR.S */
-/* File: x86/binopWide2addr.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop/2addr vA, vB */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
-    andb      $0xF,rINSTbl             # rINST<- A
-    andl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
-    andl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_LONG_2ADDR: /* 0xc1 */
-/* File: x86/OP_OR_LONG_2ADDR.S */
-/* File: x86/binopWide2addr.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop/2addr vA, vB */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
-    andb      $0xF,rINSTbl             # rINST<- A
-    orl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
-    orl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
-/* File: x86/OP_XOR_LONG_2ADDR.S */
-/* File: x86/binopWide2addr.S */
-    /*
-     * Generic 64-bit binary operation.
-     */
-    /* binop/2addr vA, vB */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
-    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
-    andb      $0xF,rINSTbl             # rINST<- A
-    xorl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
-    xorl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
-/* File: x86/OP_SHL_LONG_2ADDR.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    rINSTbl,%ecx             # ecx<- BA
-    andb      $0xf,rINSTbl            # rINST<- A
-    GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
-    sarl      $4,%ecx                 # ecx<- B
-    GET_VREG_WORD %edx rINST 1         # edx<- v[AA+1]
-    GET_VREG_R  %ecx %ecx              # ecx<- vBB
-    shldl     %eax,%edx
-    sall      %cl,%eax
-    testb     $32,%cl
-    je        2f
-    movl      %eax,%edx
-    xorl      %eax,%eax
-2:
-    SET_VREG_WORD %edx rINST 1         # v[AA+1]<- edx
-    jmp       .LOP_SHL_LONG_2ADDR_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
-/* File: x86/OP_SHR_LONG_2ADDR.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    rINSTbl,%ecx         # ecx<- BA
-    andb      $0xf,rINSTbl        # rINST<- A
-    GET_VREG_WORD %eax rINST 0     # eax<- v[AA+0]
-    sarl      $4,%ecx             # ecx<- B
-    GET_VREG_WORD %edx rINST 1     # edx<- v[AA+1]
-    GET_VREG_R %ecx %ecx           # ecx<- vBB
-    shrdl     %edx,%eax
-    sarl      %cl,%edx
-    testb     $32,%cl
-    je        2f
-    movl      %edx,%eax
-    sarl      $31,%edx
-2:
-    SET_VREG_WORD %edx rINST 1     # v[AA+1]<- edx
-    jmp       .LOP_SHR_LONG_2ADDR_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
-/* File: x86/OP_USHR_LONG_2ADDR.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill edx */
-    /* rINSTw gets AA */
-    movzbl    rINSTbl,%ecx             # ecx<- BA
-    andb      $0xf,rINSTbl            # rINST<- A
-    GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
-    sarl      $4,%ecx                 # ecx<- B
-    GET_VREG_WORD %edx rINST 1         # edx<- v[AA+1]
-    GET_VREG_R %ecx %ecx               # ecx<- vBB
-    shrdl     %edx,%eax
-    shrl      %cl,%edx
-    testb     $32,%cl
-    je        2f
-    movl      %edx,%eax
-    xorl      %edx,%edx
-2:
-    SET_VREG_WORD %edx rINST 1         # v[AA+1]<- edx
-    jmp       .LOP_USHR_LONG_2ADDR_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
-/* File: x86/OP_ADD_FLOAT_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    flds    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fadds   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstps    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
-/* File: x86/OP_SUB_FLOAT_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    flds    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fsubs   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstps    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
-/* File: x86/OP_MUL_FLOAT_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    flds    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fmuls   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstps    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
-/* File: x86/OP_DIV_FLOAT_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    flds    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fdivs   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstps    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_FLOAT_2ADDR: /* 0xca */
-/* File: x86/OP_REM_FLOAT_2ADDR.S */
-    /* rem_float/2addr vA, vB */
-    movzx   rINSTbl,%ecx                # ecx<- A+
-    sarl    $4,rINST                  # rINST<- B
-    flds     (rFP,rINST,4)              # vBB to fp stack
-    andb    $0xf,%cl                   # ecx<- A
-    flds     (rFP,%ecx,4)               # vAA to fp stack
-    FETCH_INST_OPCODE 1 %edx
-1:
-    fprem
-    fstsw     %ax
-    sahf
-    jp        1b
-    fstp      %st(1)
-    ADVANCE_PC 1
-    fstps    (rFP,%ecx,4)               # %st to vA
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
-/* File: x86/OP_ADD_DOUBLE_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    fldl    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    faddl   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstpl    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
-/* File: x86/OP_SUB_DOUBLE_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    fldl    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fsubl   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstpl    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
-/* File: x86/OP_MUL_DOUBLE_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    fldl    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fmull   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstpl    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
-/* File: x86/OP_DIV_DOUBLE_2ADDR.S */
-/* File: x86/binflop2addr.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp
-     */
-
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl,%ecx           # ecx<- A+
-    andb    $0xf,%cl              # ecx<- A
-    fldl    (rFP,%ecx,4)          # vAA to fp stack
-    sarl    $4,rINST             # rINST<- B
-    fdivl   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
-    fstpl    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
-/* File: x86/OP_REM_DOUBLE_2ADDR.S */
-    /* rem_float/2addr vA, vB */
-    movzx   rINSTbl,%ecx                # ecx<- A+
-    sarl    $4,rINST                  # rINST<- B
-    fldl     (rFP,rINST,4)              # vBB to fp stack
-    andb    $0xf,%cl                   # ecx<- A
-    fldl     (rFP,%ecx,4)               # vAA to fp stack
-    FETCH_INST_OPCODE 1 %edx
-1:
-    fprem
-    fstsw     %ax
-    sahf
-    jp        1b
-    fstp      %st(1)
-    ADVANCE_PC 1
-    fstpl    (rFP,%ecx,4)               # %st to vA
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_INT_LIT16: /* 0xd0 */
-/* File: x86/OP_ADD_INT_LIT16.S */
-/* File: x86/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int/lit16, rsub-int,
-     *      and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    addl %ecx,%eax                              # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_RSUB_INT: /* 0xd1 */
-/* File: x86/OP_RSUB_INT.S */
-/* File: x86/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int/lit16, rsub-int,
-     *      and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    subl %eax,%ecx                              # for example: addl %ecx, %eax
-    SET_VREG %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_INT_LIT16: /* 0xd2 */
-/* File: x86/OP_MUL_INT_LIT16.S */
-    /* mul/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    imull     %ecx,%eax                 # trashes edx
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_INT_LIT16: /* 0xd3 */
-/* File: x86/OP_DIV_INT_LIT16.S */
-/* File: x86/bindivLit16.S */
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl   rINSTbl,%eax         # eax<- 000000BA
-    sarl     $4,%eax             # eax<- B
-    GET_VREG_R %eax %eax          # eax<- vB
-    movswl   2(rPC),%ecx          # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl        # rINST<- A
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_DIV_INT_LIT16_continue_div
-    cmpl     $0x80000000,%eax
-    jne      .LOP_DIV_INT_LIT16_continue_div
-    movl     $0x80000000,%eax
-    jmp      .LOP_DIV_INT_LIT16_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_INT_LIT16: /* 0xd4 */
-/* File: x86/OP_REM_INT_LIT16.S */
-/* File: x86/bindivLit16.S */
-    /*
-     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
-     * op1=-1.
-     */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl   rINSTbl,%eax         # eax<- 000000BA
-    sarl     $4,%eax             # eax<- B
-    GET_VREG_R %eax %eax          # eax<- vB
-    movswl   2(rPC),%ecx          # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl        # rINST<- A
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $-1,%ecx
-    jne      .LOP_REM_INT_LIT16_continue_div
-    cmpl     $0x80000000,%eax
-    jne      .LOP_REM_INT_LIT16_continue_div
-    movl     $0,%edx
-    jmp      .LOP_REM_INT_LIT16_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_INT_LIT16: /* 0xd5 */
-/* File: x86/OP_AND_INT_LIT16.S */
-/* File: x86/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int/lit16, rsub-int,
-     *      and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    andl %ecx,%eax                              # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_INT_LIT16: /* 0xd6 */
-/* File: x86/OP_OR_INT_LIT16.S */
-/* File: x86/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int/lit16, rsub-int,
-     *      and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    orl     %ecx,%eax                              # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_INT_LIT16: /* 0xd7 */
-/* File: x86/OP_XOR_INT_LIT16.S */
-/* File: x86/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than eax, you can override "result".)
-     *
-     * For: add-int/lit16, rsub-int,
-     *      and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl   rINSTbl,%eax               # eax<- 000000BA
-    sarl     $4,%eax                   # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB
-    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
-    andb     $0xf,rINSTbl              # rINST<- A
-    xor    %ecx,%eax                              # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_ADD_INT_LIT8: /* 0xd8 */
-/* File: x86/OP_ADD_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    addl %ecx,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_RSUB_INT_LIT8: /* 0xd9 */
-/* File: x86/OP_RSUB_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    subl  %eax,%ecx                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %ecx rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_MUL_INT_LIT8: /* 0xda */
-/* File: x86/OP_MUL_INT_LIT8.S */
-    /* mul/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    imull     %ecx,%eax                # trashes edx
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG  %eax rINST
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DIV_INT_LIT8: /* 0xdb */
-/* File: x86/OP_DIV_INT_LIT8.S */
-/* File: x86/bindivLit8.S */
-    /*
-     * 32-bit div/rem "lit8" binary operation.  Handles special case of
-     * op0=minint & op1=-1
-     */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax        # eax<- BB
-    movsbl    3(rPC),%ecx        # ecx<- ssssssCC
-    GET_VREG_R  %eax %eax        # eax<- rBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $0x80000000,%eax
-    jne      .LOP_DIV_INT_LIT8_continue_div
-    cmpl     $-1,%ecx
-    jne      .LOP_DIV_INT_LIT8_continue_div
-    movl     $0x80000000,%eax
-    jmp      .LOP_DIV_INT_LIT8_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_REM_INT_LIT8: /* 0xdc */
-/* File: x86/OP_REM_INT_LIT8.S */
-/* File: x86/bindivLit8.S */
-    /*
-     * 32-bit div/rem "lit8" binary operation.  Handles special case of
-     * op0=minint & op1=-1
-     */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax        # eax<- BB
-    movsbl    3(rPC),%ecx        # ecx<- ssssssCC
-    GET_VREG_R  %eax %eax        # eax<- rBB
-    cmpl     $0,%ecx
-    je       common_errDivideByZero
-    cmpl     $0x80000000,%eax
-    jne      .LOP_REM_INT_LIT8_continue_div
-    cmpl     $-1,%ecx
-    jne      .LOP_REM_INT_LIT8_continue_div
-    movl     $0,%edx
-    jmp      .LOP_REM_INT_LIT8_finish_div
-
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_AND_INT_LIT8: /* 0xdd */
-/* File: x86/OP_AND_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    andl %ecx,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_OR_INT_LIT8: /* 0xde */
-/* File: x86/OP_OR_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    orl     %ecx,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_XOR_INT_LIT8: /* 0xdf */
-/* File: x86/OP_XOR_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    xor    %ecx,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHL_INT_LIT8: /* 0xe0 */
-/* File: x86/OP_SHL_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    sall  %cl,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SHR_INT_LIT8: /* 0xe1 */
-/* File: x86/OP_SHR_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    sarl    %cl,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_USHR_INT_LIT8: /* 0xe2 */
-/* File: x86/OP_USHR_INT_LIT8.S */
-/* File: x86/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = eax op ecx".
-     * This could be an x86 instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * For: add-int/lit8, rsub-int/lit8
-     *      and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl    2(rPC),%eax              # eax<- BB
-    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
-    GET_VREG_R   %eax %eax             # eax<- rBB
-    shrl     %cl,%eax                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG   %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_VOLATILE: /* 0xe3 */
-/* File: x86/OP_IGET_VOLATILE.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_VOLATILE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_VOLATILE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_VOLATILE: /* 0xe4 */
-/* File: x86/OP_IPUT_VOLATILE.S */
-/* File: x86/OP_IPUT.S */
-
-    /*
-     * General 32-bit instance field put.
-     *
-     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_VOLATILE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_VOLATILE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_VOLATILE: /* 0xe5 */
-/* File: x86/OP_SGET_VOLATILE.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_VOLATILE_resolve                # if not, make it so
-.LOP_SGET_VOLATILE_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_VOLATILE: /* 0xe6 */
-/* File: x86/OP_SPUT_VOLATILE.S */
-/* File: x86/OP_SPUT.S */
-    /*
-     * General 32-bit SPUT handler.
-     *
-     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_VOLATILE_resolve                # if not, make it so
-.LOP_SPUT_VOLATILE_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
-/* File: x86/OP_IGET_OBJECT_VOLATILE.S */
-/* File: x86/OP_IGET.S */
-    /*
-     * General 32-bit instance field get.
-     *
-     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IGET_OBJECT_VOLATILE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .LOP_IGET_OBJECT_VOLATILE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_IGET_WIDE_VOLATILE     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_IPUT_WIDE_VOLATILE     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_SGET_WIDE_VOLATILE     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_SPUT_WIDE_VOLATILE     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_BREAKPOINT: /* 0xec */
-/* File: x86/OP_BREAKPOINT.S */
-/* File: x86/unused.S */
-    jmp     common_abort
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
-/* File: x86/OP_THROW_VERIFICATION_ERROR.S */
-    /*
-     * Handle a throw-verification-error instruction.  This throws an
-     * exception for an error discovered during verification.  The
-     * exception is indicated by AA, with some detail provided by BBBB.
-     */
-    /* op AA, ref@BBBB */
-    movl     rGLUE,%ecx
-    movzwl   2(rPC),%eax                     # eax<- BBBB
-    movl     offGlue_method(%ecx),%ecx       # ecx<- glue->method
-    EXPORT_PC
-    movl     %eax,OUT_ARG2(%esp)             # arg2<- BBBB
-    movl     rINST,OUT_ARG1(%esp)            # arg1<- AA
-    movl     %ecx,OUT_ARG0(%esp)             # arg0<- method
-    call     dvmThrowVerificationError       # call(method, kind, ref)
-    jmp      common_exceptionThrown          # handle exception
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_EXECUTE_INLINE: /* 0xee */
-/* File: x86/OP_EXECUTE_INLINE.S */
-    /*
-     * Execute a "native inline" instruction.
-     *
-     * We will be calling through a function table:
-     *
-     * (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3, pResult)
-     *
-     * Ignores argument count - always loads 4.
-     *
-     */
-    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
-    movl      rGLUE,%ecx
-    EXPORT_PC
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    leal      offGlue_retval(%ecx),%ecx # ecx<- & glue->retval
-    movl      %ecx,OUT_ARG4(%esp)
-    call      .LOP_EXECUTE_INLINE_continue      # make call; will return after
-    testl     %eax,%eax                 # successful?
-    FETCH_INST_OPCODE 3 %edx
-    je        common_exceptionThrown    # no, handle exception
-    ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_EXECUTE_INLINE_RANGE     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
-/* File: x86/OP_INVOKE_DIRECT_EMPTY.S */
-    /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
-     */
-    FETCH_INST_WORD 3
-    ADVANCE_PC 3
-    GOTO_NEXT
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
-    /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
-    call      dvmMterp_OP_RETURN_VOID_BARRIER     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
-    FETCH_INST
-    GOTO_NEXT
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_QUICK: /* 0xf2 */
-/* File: x86/OP_IGET_QUICK.S */
-    /* For: iget-quick, iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    cmpl      $0,%ecx                  # is object null?
-    je        common_errNullObject
-    movl      (%ecx,%eax,1),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    andb      $0xf,rINSTbl             # rINST<- A
-    SET_VREG  %eax rINST                # fp[A]<- result
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
-/* File: x86/OP_IGET_WIDE_QUICK.S */
-    /* For: iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    cmpl      $0,%ecx                  # is object null?
-    je        common_errNullObject
-    leal      (%ecx,%eax,1),%eax        # eax<- address of 64-bit source
-    movl      (%eax),%ecx               # ecx<- lsw
-    movl      4(%eax),%eax              # eax<- msw
-    andb      $0xf,rINSTbl             # rINST<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG_WORD %ecx rINST 0          # v[A+0]<- lsw
-    SET_VREG_WORD %eax rINST 1          # v[A+1]<- msw
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
-/* File: x86/OP_IGET_OBJECT_QUICK.S */
-/* File: x86/OP_IGET_QUICK.S */
-    /* For: iget-quick, iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    cmpl      $0,%ecx                  # is object null?
-    je        common_errNullObject
-    movl      (%ecx,%eax,1),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    andb      $0xf,rINSTbl             # rINST<- A
-    SET_VREG  %eax rINST                # fp[A]<- result
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_QUICK: /* 0xf5 */
-/* File: x86/OP_IPUT_QUICK.S */
-    /* For: iput-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    andb      $0xf,rINSTbl             # rINST<- A
-    GET_VREG_R  rINST,rINST             # rINST<- v[A]
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    testl     %ecx,%ecx                 # is object null?
-    FETCH_INST_OPCODE 2 %edx
-    je        common_errNullObject
-    movl      rINST,(%ecx,%eax,1)
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
-/* File: x86/OP_IPUT_WIDE_QUICK.S */
-    /* For: iput-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    testl      %ecx,%ecx                # is object null?
-    je        common_errNullObject
-    leal      (%ecx,%eax,1),%ecx        # ecx<- Address of 64-bit target
-    andb      $0xf,rINSTbl             # rINST<- A
-    GET_VREG_WORD %eax rINST 0          # eax<- lsw
-    GET_VREG_WORD rINST rINST 1         # rINST<- msw
-    FETCH_INST_OPCODE 2 %edx
-    movl      %eax,(%ecx)
-    movl      rINST,4(%ecx)
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
-/* File: x86/OP_IPUT_OBJECT_QUICK.S */
-    /* For: iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl    rINSTbl,%ecx              # ecx<- BA
-    sarl      $4,%ecx                  # ecx<- B
-    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
-    andb      $0xf,rINSTbl             # rINST<- A
-    GET_VREG_R  rINST rINST             # rINST<- v[A]
-    movzwl    2(rPC),%eax               # eax<- field byte offset
-    testl     %ecx,%ecx                 # is object null?
-    je        common_errNullObject
-    movl      rINST,(%ecx,%eax,1)
-    movl      rGLUE,%eax
-    jmp       .LOP_IPUT_OBJECT_QUICK_finish
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
-/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
-    /*
-     * Handle an optimized virtual method call.
-     *
-     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movzwl    4(rPC),%eax               # eax<- FEDC or CCCC
-    movzwl    2(rPC),%ecx               # ecx<- BBBB
-    .if     (!0)
-    andl      $0xf,%eax                # eax<- C (or stays CCCC)
-    .endif
-    GET_VREG_R  %eax %eax               # eax<- vC ("this" ptr)
-    testl     %eax,%eax                 # null?
-    je        common_errNullObject      # yep, throw exception
-    movl      offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
-    movl      offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
-    EXPORT_PC                           # might throw later - get ready
-    movl      (%eax,%ecx,4),%eax        # eax<- vtable[BBBB]
-    jmp       common_invokeMethodNoRange
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
-/* File: x86/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
-/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
-    /*
-     * Handle an optimized virtual method call.
-     *
-     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movzwl    4(rPC),%eax               # eax<- FEDC or CCCC
-    movzwl    2(rPC),%ecx               # ecx<- BBBB
-    .if     (!1)
-    andl      $0xf,%eax                # eax<- C (or stays CCCC)
-    .endif
-    GET_VREG_R  %eax %eax               # eax<- vC ("this" ptr)
-    testl     %eax,%eax                 # null?
-    je        common_errNullObject      # yep, throw exception
-    movl      offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
-    movl      offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
-    EXPORT_PC                           # might throw later - get ready
-    movl      (%eax,%ecx,4),%eax        # eax<- vtable[BBBB]
-    jmp       common_invokeMethodRange
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
-/* File: x86/OP_INVOKE_SUPER_QUICK.S */
-    /*
-     * Handle an optimized "super" method call.
-     *
-     * for: [opt] invoke-super-quick, invoke-super-quick/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    4(rPC),%eax               # eax<- GFED or CCCC
-    movl      offGlue_method(%ecx),%ecx # ecx<- current method
-    .if       (!0)
-    andl      $0xf,%eax                # eax<- D (or stays CCCC)
-    .endif
-    movl      offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
-    GET_VREG_R  %eax %eax               # eax<- "this"
-    movl      offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
-    testl     %eax,%eax                 # null "this"?
-    je        common_errNullObject      # "this" is null, throw exception
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offClassObject_vtable(%ecx),%ecx # ecx<- vtable
-    EXPORT_PC
-    movl      (%ecx,%eax,4),%eax        # eax<- super->vtable[BBBB]
-    jmp       common_invokeMethodNoRange
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
-/* File: x86/OP_INVOKE_SUPER_QUICK_RANGE.S */
-/* File: x86/OP_INVOKE_SUPER_QUICK.S */
-    /*
-     * Handle an optimized "super" method call.
-     *
-     * for: [opt] invoke-super-quick, invoke-super-quick/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    4(rPC),%eax               # eax<- GFED or CCCC
-    movl      offGlue_method(%ecx),%ecx # ecx<- current method
-    .if       (!1)
-    andl      $0xf,%eax                # eax<- D (or stays CCCC)
-    .endif
-    movl      offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
-    GET_VREG_R  %eax %eax               # eax<- "this"
-    movl      offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
-    testl     %eax,%eax                 # null "this"?
-    je        common_errNullObject      # "this" is null, throw exception
-    movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offClassObject_vtable(%ecx),%ecx # ecx<- vtable
-    EXPORT_PC
-    movl      (%ecx,%eax,4),%eax        # eax<- super->vtable[BBBB]
-    jmp       common_invokeMethodRange
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
-/* File: x86/OP_IPUT_OBJECT_VOLATILE.S */
-/* File: x86/OP_IPUT_OBJECT.S */
-    /*
-     * Object field put.
-     *
-     * for: iput-object
-     */
-    /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
-    movzbl  rINSTbl,%ecx                        # ecx<- BA
-    sarl    $4,%ecx                            # ecx<- B
-    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
-    andb    $0xf,rINSTbl                       # rINST<- A
-    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
-    testl   %eax,%eax                           # is resolved entry null?
-    jne     .LOP_IPUT_OBJECT_VOLATILE_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .LOP_IPUT_OBJECT_VOLATILE_resolve
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
-/* File: x86/OP_SGET_OBJECT_VOLATILE.S */
-/* File: x86/OP_SGET.S */
-    /*
-     * General 32-bit SGET handler.
-     *
-     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SGET_OBJECT_VOLATILE_resolve                # if not, make it so
-.LOP_SGET_OBJECT_VOLATILE_finish:     # field ptr in eax
-    movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
-/* File: x86/OP_SPUT_OBJECT_VOLATILE.S */
-/* File: x86/OP_SPUT_OBJECT.S */
-    /*
-     * SPUT object handler.
-     */
-    /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
-    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
-    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
-    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
-    testl     %eax,%eax                          # resolved entry null?
-    je        .LOP_SPUT_OBJECT_VOLATILE_resolve                # if not, make it so
-.LOP_SPUT_OBJECT_VOLATILE_finish:                              # field ptr in eax
-    movzbl    rINSTbl,%ecx                       # ecx<- AA
-    GET_VREG_R  %ecx %ecx
-    jmp       .LOP_SPUT_OBJECT_VOLATILE_continue
-
-
-/* ------------------------------ */
-    .balign 64
-.L_OP_DISPATCH_FF: /* 0xff */
-/* File: x86/OP_DISPATCH_FF.S */
-/* File: x86/unused.S */
-    jmp     common_abort
-
-
-
-    .balign 64
-    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
-    .global dvmAsmInstructionEnd
-dvmAsmInstructionEnd:
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-    .global dvmAsmSisterStart
-    .type   dvmAsmSisterStart, %function
-    .text
-    .balign 4
-dvmAsmSisterStart:
-
-/* continuation for OP_CONST_STRING */
-
-/* This is the less common path, so we'll redo some work
-   here rather than force spills on the common path */
-.LOP_CONST_STRING_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
-    EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
-    movzwl   2(rPC),%ecx               # ecx<- BBBB
-    movl     offMethod_clazz(%eax),%eax
-    movl     %ecx,OUT_ARG1(%esp)
-    movl     %eax,OUT_ARG0(%esp)
-    call     dvmResolveString          # go resolve
-    testl    %eax,%eax                 # failed?
-    je       common_exceptionThrown
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_CONST_STRING_JUMBO */
-
-/* This is the less common path, so we'll redo some work
-   here rather than force spills on the common path */
-.LOP_CONST_STRING_JUMBO_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
-    EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
-    movl     2(rPC),%ecx               # ecx<- BBBBBBBB
-    movl     offMethod_clazz(%eax),%eax
-    movl     %ecx,OUT_ARG1(%esp)
-    movl     %eax,OUT_ARG0(%esp)
-    call     dvmResolveString          # go resolve
-    testl    %eax,%eax                 # failed?
-    je       common_exceptionThrown
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 3 %edx
-    ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_CONST_CLASS */
-
-/* This is the less common path, so we'll redo some work
-   here rather than force spills on the common path */
-.LOP_CONST_CLASS_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
-    EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
-    movl     $1,OUT_ARG2(%esp)        # true
-    movzwl   2(rPC),%ecx               # ecx<- BBBB
-    movl     offMethod_clazz(%eax),%eax
-    movl     %ecx,OUT_ARG1(%esp)
-    movl     %eax,OUT_ARG0(%esp)
-    call     dvmResolveClass           # go resolve
-    testl    %eax,%eax                 # failed?
-    je       common_exceptionThrown
-    SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_MONITOR_ENTER */
-
-.LOP_MONITOR_ENTER_continue:
-    movl    %ecx,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmLockObject               # dvmLockObject(self,object)
-#ifdef WITH_DEADLOCK_PREDICTION
-    movl    rGLUE,%ecx
-    movl    offGlueSelf(%ecx),%ecx      # ecx<- glue->self
-    movl    offThread_exception(%ecx),%eax
-    testl   %eax,%eax
-    jne     common_exceptionThrown
-#endif
-    ADVANCE_PC 1
-    GOTO_NEXT
-
-/* continuation for OP_MONITOR_EXIT */
-
-.LOP_MONITOR_EXIT_continue:
-    call    dvmUnlockObject             # unlock(self,obj)
-    FETCH_INST_OPCODE 1 %edx
-    testl   %eax,%eax                   # success?
-    ADVANCE_PC 1
-    je      common_exceptionThrown      # no, exception pending
-    GOTO_NEXT_R %edx
-.LOP_MONITOR_EXIT_errNullObject:
-    ADVANCE_PC 1                        # advance before throw
-    jmp     common_errNullObject
-
-/* continuation for OP_CHECK_CAST */
-
-    /*
-     * Trivial test failed, need to perform full check.  This is common.
-     *  ecx holds obj->clazz
-     *  eax holds class resolved from BBBB
-     *  rINST holds object
-     */
-.LOP_CHECK_CAST_fullcheck:
-    movl    %eax,sReg0                 # we'll need the desired class on failure
-    movl    %eax,OUT_ARG1(%esp)
-    movl    %ecx,OUT_ARG0(%esp)
-    call    dvmInstanceofNonTrivial    # eax<- boolean result
-    testl   %eax,%eax                  # failed?
-    jne     .LOP_CHECK_CAST_okay           # no, success
-
-    # A cast has failed.  We need to throw a ClassCastException.
-    EXPORT_PC
-    movl    offObject_clazz(rINST),%eax
-    movl    %eax,OUT_ARG0(%esp)                 # arg0<- obj->clazz
-    movl    sReg0,%ecx
-    movl    %ecx,OUT_ARG1(%esp)                 # arg1<- desired class
-    call    dvmThrowClassCastException
-    jmp     common_exceptionThrown
-
-    /*
-     * Resolution required.  This is the least-likely path, and we're
-     * going to have to recreate some data.
-     *
-     *  rINST holds object
-     */
-.LOP_CHECK_CAST_resolve:
-    movl    rGLUE,%ecx
-    EXPORT_PC
-    movzwl  2(rPC),%eax                # eax<- BBBB
-    movl    offGlue_method(%ecx),%ecx  # ecx<- glue->method
-    movl    %eax,OUT_ARG1(%esp)        # arg1<- BBBB
-    movl    offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
-    movl    $0,OUT_ARG2(%esp)         # arg2<- false
-    movl    %ecx,OUT_ARG0(%esp)        # arg0<- method->clazz
-    call    dvmResolveClass            # eax<- resolved ClassObject ptr
-    testl   %eax,%eax                  # got null?
-    je      common_exceptionThrown     # yes, handle exception
-    movl    offObject_clazz(rINST),%ecx  # ecx<- obj->clazz
-    jmp     .LOP_CHECK_CAST_resolved       # pick up where we left off
-
-/* continuation for OP_INSTANCE_OF */
-
-    /*
-     * Trivial test failed, need to perform full check.  This is common.
-     *  eax holds obj->clazz
-     *  ecx holds class resolved from BBBB
-     *  rINST has BA
-     */
-.LOP_INSTANCE_OF_fullcheck:
-    movl    %eax,OUT_ARG0(%esp)
-    movl    %ecx,OUT_ARG1(%esp)
-    call    dvmInstanceofNonTrivial     # eax<- boolean result
-    # fall through to OP_INSTANCE_OF_store
-
-    /*
-     * eax holds boolean result
-     * rINST holds BA
-     */
-.LOP_INSTANCE_OF_store:
-    FETCH_INST_OPCODE 2 %edx
-    andb    $0xf,rINSTbl               # <- A
-    ADVANCE_PC 2
-    SET_VREG %eax rINST                 # vA<- eax
-    GOTO_NEXT_R %edx
-
-    /*
-     * Trivial test succeeded, save and bail.
-     *  r9 holds A
-     */
-.LOP_INSTANCE_OF_trivial:
-    FETCH_INST_OPCODE 2 %edx
-    andb    $0xf,rINSTbl               # <- A
-    ADVANCE_PC 2
-    movl    $1,%eax
-    SET_VREG %eax rINST                 # vA<- true
-    GOTO_NEXT_R %edx
-
-    /*
-     * Resolution required.  This is the least-likely path.
-     *
-     *  edx holds BBBB
-     *  rINST holds BA
-     */
-.LOP_INSTANCE_OF_resolve:
-    movl    %edx,OUT_ARG1(%esp)         # arg1<- BBBB
-    movl    rGLUE,%ecx
-    movl    offGlue_method(%ecx),%ecx
-    movl    $1,OUT_ARG2(%esp)          # arg2<- true
-    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
-    EXPORT_PC
-    movl    %ecx,OUT_ARG0(%esp)         # arg0<- method->clazz
-    call    dvmResolveClass             # eax<- resolved ClassObject ptr
-    testl   %eax,%eax                   # success?
-    je      common_exceptionThrown      # no, handle exception
-/* Now, we need to sync up with fast path.  We need eax to
- * hold the obj->clazz, and ecx to hold the resolved class
- */
-    movl    %eax,%ecx                   # ecx<- resolved class
-    movl    rINST,%eax                # eax<- BA
-    sarl    $4,%eax                    # eax<- B
-    GET_VREG_R %eax %eax                # eax<- vB (obj)
-    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
-    jmp     .LOP_INSTANCE_OF_resolved
-
-/* continuation for OP_NEW_INSTANCE */
-
-.LOP_NEW_INSTANCE_initialized:  # on entry, ecx<- class
-    /* TODO: remove test for interface/abstract, now done in verifier */
-    testl     $(ACC_INTERFACE|ACC_ABSTRACT),offClassObject_accessFlags(%ecx)
-    movl      $ALLOC_DONT_TRACK,OUT_ARG1(%esp)
-    jne       .LOP_NEW_INSTANCE_abstract
-.LOP_NEW_INSTANCE_finish: # ecx=class
-    movl     %ecx,OUT_ARG0(%esp)
-    call     dvmAllocObject             # eax<- new object
-    FETCH_INST_OPCODE 2 %edx
-    testl    %eax,%eax                  # success?
-    je       common_exceptionThrown     # no, bail out
-    SET_VREG %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-    /*
-     * Class initialization required.
-     *
-     *  ecx holds class object
-     */
-.LOP_NEW_INSTANCE_needinit:
-    SPILL_TMP1(%ecx)                    # save object
-    movl    %ecx,OUT_ARG0(%esp)
-    call    dvmInitClass                # initialize class
-    UNSPILL_TMP1(%ecx)                  # restore object
-    testl   %eax,%eax                   # success?
-    jne     .LOP_NEW_INSTANCE_initialized     # success, continue
-    jmp     common_exceptionThrown      # go deal with init exception
-
-    /*
-     * Resolution required.  This is the least-likely path.
-     *
-     */
-.LOP_NEW_INSTANCE_resolve:
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%eax
-    movl    offGlue_method(%ecx),%ecx   # ecx<- glue->method
-    movl    %eax,OUT_ARG1(%esp)
-    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
-    movl    $0,OUT_ARG2(%esp)
-    movl    %ecx,OUT_ARG0(%esp)
-    call    dvmResolveClass             # call(clazz,off,flags)
-    movl    %eax,%ecx                   # ecx<- resolved ClassObject ptr
-    testl   %ecx,%ecx                   # success?
-    jne     .LOP_NEW_INSTANCE_resolved        # good to go
-    jmp     common_exceptionThrown      # no, handle exception
-
-    /*
-     * TODO: remove this
-     * We can't instantiate an abstract class or interface, so throw an
-     * InstantiationError with the class descriptor as the message.
-     *
-     *  ecx holds class object
-     */
-.LOP_NEW_INSTANCE_abstract:
-    movl    offClassObject_descriptor(%ecx),%eax
-    movl    $.LstrInstantiationError,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowExceptionWithClassMessage
-    jmp     common_exceptionThrown
-
-/* continuation for OP_NEW_ARRAY */
-
-    /*
-     * Resolve class.  (This is an uncommon case.)
-     *  ecx holds class (null here)
-     *  eax holds array length (vB)
-     */
-.LOP_NEW_ARRAY_resolve:
-    movl    rGLUE,%ecx
-    SPILL_TMP1(%eax)                   # save array length
-    movl    offGlue_method(%ecx),%ecx  # ecx<- glue->method
-    movzwl  2(rPC),%eax                # eax<- CCCC
-    movl    offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
-    movl    %eax,OUT_ARG1(%esp)
-    movl    $0,OUT_ARG2(%esp)
-    movl    %ecx,OUT_ARG0(%esp)
-    call    dvmResolveClass            # eax<- call(clazz,ref,flag)
-    movl    %eax,%ecx
-    UNSPILL_TMP1(%eax)
-    testl   %ecx,%ecx                  # successful resolution?
-    je      common_exceptionThrown     # no, bail.
-# fall through to OP_NEW_ARRAY_finish
-
-    /*
-     * Finish allocation
-     *
-     * ecx holds class
-     * eax holds array length (vB)
-     */
-.LOP_NEW_ARRAY_finish:
-    movl    %ecx,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)
-    call    dvmAllocArrayByClass    # eax<- call(clazz,length,flags)
-    FETCH_INST_OPCODE 2 %edx
-    testl   %eax,%eax               # failed?
-    je      common_exceptionThrown  # yup - go handle
-    SET_VREG %eax rINST
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_FILLED_NEW_ARRAY */
-
-.LOP_FILLED_NEW_ARRAY_more:
-    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
-    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
-    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
-    testl   %eax,%eax                         # null?
-    je      common_exceptionThrown            # yes, handle it
-
-       # note: fall through to .LOP_FILLED_NEW_ARRAY_continue
-
-    /*
-     * On entry:
-     *    eax holds array class [r0]
-     *    rINST holds AA or BB [r10]
-     *    ecx is scratch
-     */
-.LOP_FILLED_NEW_ARRAY_continue:
-    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
-    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
-    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
-    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
-    movl    rGLUE,%eax
-    cmpb    $'I',%cl                             # supported?
-    je      1f
-    cmpb    $'L',%cl
-    je      1f
-    cmpb    $'[',%cl
-    jne      .LOP_FILLED_NEW_ARRAY_notimpl                  # no, not handled yet
-1:
-    movl    %ecx,offGlue_retval+4(%eax)           # save type
-    .if      (!0)
-    SPILL_TMP1(rINST)                              # save copy, need "B" later
-    sarl    $4,rINST
-    .endif
-    movl    rINST,OUT_ARG1(%esp)                  # arg1<- A or AA (length)
-    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
-    movl    rGLUE,%ecx
-    testl   %eax,%eax                             # alloc successful?
-    je      common_exceptionThrown                # no, handle exception
-    movl    %eax,offGlue_retval(%ecx)             # retval.l<- new array
-    movzwl  4(rPC),%ecx                           # ecx<- FEDC or CCCC
-    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
-
-/* at this point:
- *     eax is pointer to tgt
- *     rINST is length
- *     ecx is FEDC or CCCC
- *     TMP_SPILL1 is BA
- *  We now need to copy values from registers into the array
- */
-
-    .if 0
-    # set up src pointer
-    SPILL_TMP2(%esi)
-    SPILL_TMP3(%edi)
-    leal    (rFP,%ecx,4),%esi # set up src ptr
-    movl    %eax,%edi         # set up dst ptr
-    movl    rINST,%ecx        # load count register
-    rep
-    movsd
-    UNSPILL_TMP2(%esi)
-    UNSPILL_TMP3(%edi)
-    movl    rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
-    .else
-    testl  rINST,rINST
-    je     4f
-    UNSPILL_TMP1(%edx)        # restore "BA"
-    andl   $0x0f,%edx        # edx<- 0000000A
-    sall   $16,%edx          # edx<- 000A0000
-    orl    %ecx,%edx          # edx<- 000AFEDC
-3:
-    movl   $0xf,%ecx
-    andl   %edx,%ecx          # ecx<- next reg to load
-    GET_VREG_R %ecx %ecx
-    shrl   $4,%edx
-    leal   4(%eax),%eax
-    movl   %ecx,-4(%eax)
-    sub    $1,rINST
-    jne    3b
-4:
-    movl   rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
-    .endif
-
-    cmpb    $'I',%al                        # Int array?
-    je      5f                               # skip card mark if so
-    movl    offGlue_retval(%ecx),%eax        # eax<- object head
-    movl    offGlue_cardTable(%ecx),%ecx     # card table base
-    shrl    $GC_CARD_SHIFT,%eax             # convert to card num
-    movb    %cl,(%ecx,%eax)                  # mark card based on object head
-5:
-    ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-
-
-    /*
-     * Throw an exception indicating that we have not implemented this
-     * mode of filled-new-array.
-     */
-.LOP_FILLED_NEW_ARRAY_notimpl:
-    movl    $.LstrInternalErrorA,%eax
-    movl    %eax,OUT_ARG0(%esp)
-    movl    $.LstrFilledNewArrayNotImplA,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
-    jmp     common_exceptionThrown
-
-/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
-
-.LOP_FILLED_NEW_ARRAY_RANGE_more:
-    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
-    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
-    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
-    testl   %eax,%eax                         # null?
-    je      common_exceptionThrown            # yes, handle it
-
-       # note: fall through to .LOP_FILLED_NEW_ARRAY_RANGE_continue
-
-    /*
-     * On entry:
-     *    eax holds array class [r0]
-     *    rINST holds AA or BB [r10]
-     *    ecx is scratch
-     */
-.LOP_FILLED_NEW_ARRAY_RANGE_continue:
-    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
-    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
-    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
-    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
-    movl    rGLUE,%eax
-    cmpb    $'I',%cl                             # supported?
-    je      1f
-    cmpb    $'L',%cl
-    je      1f
-    cmpb    $'[',%cl
-    jne      .LOP_FILLED_NEW_ARRAY_RANGE_notimpl                  # no, not handled yet
-1:
-    movl    %ecx,offGlue_retval+4(%eax)           # save type
-    .if      (!1)
-    SPILL_TMP1(rINST)                              # save copy, need "B" later
-    sarl    $4,rINST
-    .endif
-    movl    rINST,OUT_ARG1(%esp)                  # arg1<- A or AA (length)
-    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
-    movl    rGLUE,%ecx
-    testl   %eax,%eax                             # alloc successful?
-    je      common_exceptionThrown                # no, handle exception
-    movl    %eax,offGlue_retval(%ecx)             # retval.l<- new array
-    movzwl  4(rPC),%ecx                           # ecx<- FEDC or CCCC
-    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
-
-/* at this point:
- *     eax is pointer to tgt
- *     rINST is length
- *     ecx is FEDC or CCCC
- *     TMP_SPILL1 is BA
- *  We now need to copy values from registers into the array
- */
-
-    .if 1
-    # set up src pointer
-    SPILL_TMP2(%esi)
-    SPILL_TMP3(%edi)
-    leal    (rFP,%ecx,4),%esi # set up src ptr
-    movl    %eax,%edi         # set up dst ptr
-    movl    rINST,%ecx        # load count register
-    rep
-    movsd
-    UNSPILL_TMP2(%esi)
-    UNSPILL_TMP3(%edi)
-    movl    rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
-    .else
-    testl  rINST,rINST
-    je     4f
-    UNSPILL_TMP1(%edx)        # restore "BA"
-    andl   $0x0f,%edx        # edx<- 0000000A
-    sall   $16,%edx          # edx<- 000A0000
-    orl    %ecx,%edx          # edx<- 000AFEDC
-3:
-    movl   $0xf,%ecx
-    andl   %edx,%ecx          # ecx<- next reg to load
-    GET_VREG_R %ecx %ecx
-    shrl   $4,%edx
-    leal   4(%eax),%eax
-    movl   %ecx,-4(%eax)
-    sub    $1,rINST
-    jne    3b
-4:
-    movl   rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
-    .endif
-
-    cmpb    $'I',%al                        # Int array?
-    je      5f                               # skip card mark if so
-    movl    offGlue_retval(%ecx),%eax        # eax<- object head
-    movl    offGlue_cardTable(%ecx),%ecx     # card table base
-    shrl    $GC_CARD_SHIFT,%eax             # convert to card num
-    movb    %cl,(%ecx,%eax)                  # mark card based on object head
-5:
-    ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-
-
-    /*
-     * Throw an exception indicating that we have not implemented this
-     * mode of filled-new-array.
-     */
-.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
-    movl    $.LstrInternalErrorA,%eax
-    movl    %eax,OUT_ARG0(%esp)
-    movl    $.LstrFilledNewArrayNotImplA,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
-    jmp     common_exceptionThrown
-
-/* continuation for OP_CMPL_FLOAT */
-
-.LOP_CMPL_FLOAT_isNaN:
-    movl      $-1,%ecx
-    jmp       .LOP_CMPL_FLOAT_finish
-
-/* continuation for OP_CMPG_FLOAT */
-
-.LOP_CMPG_FLOAT_isNaN:
-    movl      $1,%ecx
-    jmp       .LOP_CMPG_FLOAT_finish
-
-/* continuation for OP_CMPL_DOUBLE */
-
-.LOP_CMPL_DOUBLE_isNaN:
-    movl      $-1,%ecx
-    jmp       .LOP_CMPL_DOUBLE_finish
-
-/* continuation for OP_CMPG_DOUBLE */
-
-.LOP_CMPG_DOUBLE_isNaN:
-    movl      $1,%ecx
-    jmp       .LOP_CMPG_DOUBLE_finish
-
-/* continuation for OP_CMP_LONG */
-
-.LOP_CMP_LONG_bigger:
-    movl      $1,%ecx
-    jmp       .LOP_CMP_LONG_finish
-.LOP_CMP_LONG_smaller:
-    movl      $-1,%ecx
-.LOP_CMP_LONG_finish:
-    SET_VREG %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_AGET_WIDE */
-
-.LOP_AGET_WIDE_finish:
-    leal      offArrayObject_contents(%eax,%ecx,8),%eax
-    movl      (%eax),%ecx
-    movl      4(%eax),%eax
-    SET_VREG_WORD %ecx rINST 0
-    SET_VREG_WORD %eax rINST 1
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_APUT_WIDE */
-
-.LOP_APUT_WIDE_finish:
-    leal      offArrayObject_contents(%eax,%ecx,8),%eax
-    GET_VREG_WORD %ecx rINST 0
-    GET_VREG_WORD rINST rINST 1
-    movl      rINST,4(%eax)
-    FETCH_INST_OPCODE 2 %edx
-    movl      %ecx,(%eax)
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_APUT_OBJECT */
-
-    /* On entry:
-     *   eax<- array object
-     *   ecx<- index
-     *   rINST<- vAA
-     */
-.LOP_APUT_OBJECT_continue:
-    leal      offArrayObject_contents(%eax,%ecx,4),%ecx
-    testl     rINST,rINST                    # storing null reference?
-    je        .LOP_APUT_OBJECT_skip_check
-    SPILL_TMP1(%ecx)                         # save target address
-    SPILL_TMP2(%eax)                         # save object head
-    movl      offObject_clazz(%eax),%eax     # eax<- arrayObj->clazz
-    movl      offObject_clazz(rINST),%ecx    # ecx<- obj->clazz
-    movl      %eax,OUT_ARG1(%esp)
-    movl      %ecx,OUT_ARG0(%esp)
-    movl      %ecx,sReg0                     # store the two classes for later
-    movl      %eax,sReg1
-    call      dvmCanPutArrayElement          # test object type vs. array type
-    UNSPILL_TMP1(%ecx)                       # recover target address
-    testl     %eax,%eax
-    movl      rGLUE,%eax
-    jne       .LOP_APUT_OBJECT_types_okay
-
-    # The types don't match.  We need to throw an ArrayStoreException.
-    EXPORT_PC
-    movl      sReg0,%eax                     # restore the two classes...
-    movl      %eax,OUT_ARG0(%esp)
-    movl      sReg1,%ecx
-    movl      %ecx,OUT_ARG1(%esp)
-    call      dvmThrowArrayStoreException    # ...and throw
-    jmp       common_exceptionThrown
-
-.LOP_APUT_OBJECT_types_okay:
-    movl      offGlue_cardTable(%eax),%eax   # get card table base
-    movl      rINST,(%ecx)                   # store into array
-    UNSPILL_TMP2(%ecx)                       # recover object head
-    FETCH_INST_OPCODE 2 %edx
-    shrl      $GC_CARD_SHIFT,%ecx           # object head to card number
-    movb      %al,(%eax,%ecx)                # mark card using object head
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-.LOP_APUT_OBJECT_skip_check:
-    movl      rINST,(%ecx)
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET */
-
-
-.LOP_IGET_resolve:
-    EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7120,19 +2564,37 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
 
-/* continuation for OP_IGET_WIDE */
-
-
-.LOP_IGET_WIDE_resolve:
+/* ------------------------------ */
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: x86/OP_IGET_WIDE.S */
+    /*
+     * 64-bit instance field get.
+     *
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_WIDE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # for dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save objpointer across call
     movl    rPC,OUT_ARG0(%esp)                  # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
@@ -7154,21 +2616,42 @@
     leal    (%ecx,%eax,1),%eax                  # eax<- address of field
     movl    (%eax),%ecx                         # ecx<- lsw
     movl    4(%eax),%eax                        # eax<- msw
-    FETCH_INST_OPCODE 2 %edx
     SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                             # restore rIBASE
     SET_VREG_WORD %eax rINST 1
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_IGET_OBJECT */
-
-
-.LOP_IGET_OBJECT_resolve:
+/* ------------------------------ */
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: x86/OP_IGET_OBJECT.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_OBJECT_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7186,21 +2669,42 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET_BOOLEAN */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IGET_BOOLEAN_resolve:
+/* ------------------------------ */
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: x86/OP_IGET_BOOLEAN.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_BOOLEAN_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7218,21 +2722,42 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movzbl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET_BYTE */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IGET_BYTE_resolve:
+/* ------------------------------ */
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: x86/OP_IGET_BYTE.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_BYTE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7250,21 +2775,42 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movsbl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET_CHAR */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IGET_CHAR_resolve:
+/* ------------------------------ */
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: x86/OP_IGET_CHAR.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_CHAR_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7282,21 +2828,42 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movzwl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET_SHORT */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IGET_SHORT_resolve:
+/* ------------------------------ */
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: x86/OP_IGET_SHORT.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_SHORT_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -7314,21 +2881,42 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movswl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IPUT */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IPUT_resolve:
+/* ------------------------------ */
+.L_OP_IPUT: /* 0x59 */
+/* File: x86/OP_IPUT.S */
+
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7346,20 +2934,39 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movl   rINST,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_IPUT_WIDE */
-
-
-.LOP_IPUT_WIDE_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: x86/OP_IPUT_WIDE.S */
+    /*
+     * 64-bit instance field put.
+     *
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_WIDE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  ... which returns InstrField ptr
@@ -7371,7 +2978,7 @@
      * Currently:
      *   eax holds resolved field
      *   ecx holds object
-     *   %edx is scratch, but needs to be unspilled
+     *   rIBASE is scratch, but needs to be unspilled
      *   rINST holds A
      */
     movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
@@ -7380,21 +2987,41 @@
     leal    (%ecx,%eax,1),%eax                  # eax<- address of field
     GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
     GET_VREG_WORD rINST rINST 1                 # rINST<- msw
-    FETCH_INST_OPCODE 2 %edx
     movl    rINST,4(%eax)
     movl    %ecx,(%eax)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_IPUT_OBJECT */
-
-
-.LOP_IPUT_OBJECT_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: x86/OP_IPUT_OBJECT.S */
+    /*
+     * Object field put.
+     *
+     * for: iput-object
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                  # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_OBJECT_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7406,7 +3033,7 @@
      * Currently:
      *   eax holds resolved field
      *   ecx holds object
-     *   %edx is scratch, but needs to be unspilled
+     *   rIBASE is scratch, but needs to be unspilled
      *   rINST holds A
      */
     GET_VREG_R rINST rINST                      # rINST<- v[A]
@@ -7414,26 +3041,48 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl    rINST,(%ecx,%eax)      # obj.field <- v[A](8/16/32 bits)
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     testl   rINST,rINST                         # stored a NULL?
-    movl    offGlue_cardTable(%eax),%eax        # get card table base
-    FETCH_INST_OPCODE 2 %edx
+    movl    offThread_cardTable(%eax),%eax      # get card table base
     je      1f                                  # skip card mark if null store
     shrl    $GC_CARD_SHIFT,%ecx                # object head to card number
     movb    %al,(%eax,%ecx)                     # mark card using object head
 1:
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_IPUT_BOOLEAN */
+/* ------------------------------ */
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: x86/OP_IPUT_BOOLEAN.S */
+/* File: x86/OP_IPUT.S */
 
-
-.LOP_IPUT_BOOLEAN_resolve:
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_BOOLEAN_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7451,20 +3100,43 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movb   rINSTbl,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IPUT_BYTE */
+    GOTO_NEXT_R %ecx
 
 
-.LOP_IPUT_BYTE_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: x86/OP_IPUT_BYTE.S */
+/* File: x86/OP_IPUT.S */
+
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_BYTE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7482,20 +3154,43 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movb   rINSTbl,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IPUT_CHAR */
+    GOTO_NEXT_R %ecx
 
 
-.LOP_IPUT_CHAR_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: x86/OP_IPUT_CHAR.S */
+/* File: x86/OP_IPUT.S */
+
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_CHAR_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7513,20 +3208,43 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movw   rINSTw,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IPUT_SHORT */
+    GOTO_NEXT_R %ecx
 
 
-.LOP_IPUT_SHORT_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: x86/OP_IPUT_SHORT.S */
+/* File: x86/OP_IPUT.S */
+
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_SHORT_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -7544,279 +3262,634 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movw   rINSTw,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_SGET */
+
+/* ------------------------------ */
+.L_OP_SGET: /* 0x60 */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_resolve                # if not, make it so
+.LOP_SGET_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_WIDE */
+/* ------------------------------ */
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: x86/OP_SGET_WIDE.S */
+    /*
+     * 64-bit SGET handler.
+     *
+     */
+    /* sget-wide vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_WIDE_resolve                # if not, make it so
+.LOP_SGET_WIDE_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%ecx    # ecx<- lsw
+    movl      4+offStaticField_value(%eax),%eax  # eax<- msw
+    SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG_WORD %eax rINST 1
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_WIDE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_WIDE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_OBJECT */
+/* ------------------------------ */
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: x86/OP_SGET_OBJECT.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_OBJECT_resolve                # if not, make it so
+.LOP_SGET_OBJECT_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_OBJECT_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_OBJECT_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_BOOLEAN */
+
+/* ------------------------------ */
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: x86/OP_SGET_BOOLEAN.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_BOOLEAN_resolve                # if not, make it so
+.LOP_SGET_BOOLEAN_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_BOOLEAN_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_BOOLEAN_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_BYTE */
+
+/* ------------------------------ */
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: x86/OP_SGET_BYTE.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_BYTE_resolve                # if not, make it so
+.LOP_SGET_BYTE_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_BYTE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_BYTE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_CHAR */
+
+/* ------------------------------ */
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: x86/OP_SGET_CHAR.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_CHAR_resolve                # if not, make it so
+.LOP_SGET_CHAR_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_CHAR_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_CHAR_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SGET_SHORT */
+
+/* ------------------------------ */
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: x86/OP_SGET_SHORT.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_SHORT_resolve                # if not, make it so
+.LOP_SGET_SHORT_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_SHORT_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_SHORT_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT */
+
+/* ------------------------------ */
+.L_OP_SPUT: /* 0x67 */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_resolve                # if not, make it so
+.LOP_SPUT_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_WIDE */
+/* ------------------------------ */
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: x86/OP_SPUT_WIDE.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_WIDE_resolve                # if not, make it so
+.LOP_SPUT_WIDE_finish:     # field ptr in eax
+    GET_VREG_WORD %ecx rINST 0                  # rINST<- lsw
+    GET_VREG_WORD rINST rINST 1                 # ecx<- msw
+    movl      %ecx,offStaticField_value(%eax)
+    FETCH_INST_OPCODE 2 %ecx
+    movl      rINST,4+offStaticField_value(%eax)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_WIDE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_WIDE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_OBJECT */
-
-
-.LOP_SPUT_OBJECT_continue:
+/* ------------------------------ */
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: x86/OP_SPUT_OBJECT.S */
+    /*
+     * SPUT object handler.
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_OBJECT_resolve                # if not, make it so
+.LOP_SPUT_OBJECT_finish:                              # field ptr in eax
+    movzbl    rINSTbl,%ecx                       # ecx<- AA
+    GET_VREG_R  %ecx %ecx
     movl      %ecx,offStaticField_value(%eax)    # do the store
     testl     %ecx,%ecx                          # stored null object ptr?
-    FETCH_INST_OPCODE 2 %edx
     je        1f                                 # skip card mark if null
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movl      offField_clazz(%eax),%eax          # eax<- method->clazz
-    movl      offGlue_cardTable(%ecx),%ecx       # get card table base
+    movl      offThread_cardTable(%ecx),%ecx       # get card table base
     shrl      $GC_CARD_SHIFT,%eax               # head to card number
     movb      %cl,(%ecx,%eax)                    # mark card
 1:
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_SPUT_OBJECT_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_OBJECT_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_BOOLEAN */
+/* ------------------------------ */
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: x86/OP_SPUT_BOOLEAN.S */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_BOOLEAN_resolve                # if not, make it so
+.LOP_SPUT_BOOLEAN_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_BOOLEAN_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_BOOLEAN_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_BYTE */
+
+/* ------------------------------ */
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: x86/OP_SPUT_BYTE.S */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_BYTE_resolve                # if not, make it so
+.LOP_SPUT_BYTE_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_BYTE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_BYTE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_CHAR */
+
+/* ------------------------------ */
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: x86/OP_SPUT_CHAR.S */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_CHAR_resolve                # if not, make it so
+.LOP_SPUT_CHAR_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_CHAR_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_CHAR_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_SHORT */
+
+/* ------------------------------ */
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: x86/OP_SPUT_SHORT.S */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_SHORT_resolve                # if not, make it so
+.LOP_SPUT_SHORT_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_SHORT_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_SHORT_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_INVOKE_VIRTUAL */
 
+/* ------------------------------ */
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: x86/OP_INVOKE_VIRTUAL.S */
 
-.LOP_INVOKE_VIRTUAL_more:
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%eax
+    movzwl    2(rPC),%ecx                 # ecx<- BBBB
+    movl      offThread_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
+    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
+    testl     %eax,%eax                   # already resolved?
+    jne       .LOP_INVOKE_VIRTUAL_continue        # yes, continue
+    movl      rSELF,%eax
+    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    movl      offThread_method(%eax),%eax   # eax<- self->method
     movl      offMethod_clazz(%eax),%eax  # ecx<- method->clazz
     movl      %eax,OUT_ARG0(%esp)         # arg0<- clazz
     movl      $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
@@ -7843,8 +3916,33 @@
     movl      (%ecx,%eax,4),%eax        # eax<- vtable[methodIndex]
     jmp       common_invokeMethodNoRange
 
-/* continuation for OP_INVOKE_SUPER */
-
+/* ------------------------------ */
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: x86/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,rINST
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offThread_methodClassDex(rINST),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
+    movl      offThread_method(rINST),%eax # eax<- method
+    movzwl    4(rPC),rINST              # rINST<- GFED or CCCC
+    .if       (!0)
+    andl      $0xf,rINST               # rINST<- D (or stays CCCC)
+    .endif
+    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
+    testl     rINST,rINST               # null "this"?
+    je        common_errNullObject      # yes, throw
+    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
+    testl     %ecx,%ecx                 # already resolved?
+    je       .LOP_INVOKE_SUPER_resolve
     /*
      * At this point:
      *  ecx = resolved base method [r0]
@@ -7883,10 +3981,40 @@
      */
 .LOP_INVOKE_SUPER_nsm:
     movl    offMethod_name(%ecx),%eax
-    mov     %eax,OUT_ARG1(%esp)
     jmp     common_errNoSuchMethod
 
-/* continuation for OP_INVOKE_DIRECT */
+/* ------------------------------ */
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: x86/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax              # eax<- BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movzwl    4(rPC),rIBASE            # rIBASE<- GFED or CCCC
+    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
+    .if       (!0)
+    andl      $0xf,rIBASE             # rIBASE<- D (or stays CCCC)
+    .endif
+    testl     %eax,%eax                # already resolved?
+    GET_VREG_R  %ecx rIBASE            # ecx<- "this" ptr
+    je        .LOP_INVOKE_DIRECT_resolve      # not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+    testl     %ecx,%ecx                # null "this"?
+    jne       common_invokeMethodNoRange  # no, continue on
+    jmp       common_errNullObject
 
     /*
      * On entry:
@@ -7896,8 +4024,8 @@
      */
 .LOP_INVOKE_DIRECT_resolve:
      SPILL_TMP1(%ecx)
-     movl     rGLUE,%ecx
-     movl     offGlue_method(%ecx),%ecx  # ecx<- glue->method
+     movl     rSELF,%ecx
+     movl     offThread_method(%ecx),%ecx  # ecx<- self->method
      movzwl   2(rPC),%eax      # reference (BBBB or CCCC)
      movl     offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
      movl     $METHOD_DIRECT,OUT_ARG2(%esp)
@@ -7909,9 +4037,30 @@
      jne      .LOP_INVOKE_DIRECT_finish
      jmp      common_exceptionThrown
 
-/* continuation for OP_INVOKE_STATIC */
-
-.LOP_INVOKE_STATIC_continue:
+/* ------------------------------ */
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: x86/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
+    testl     %eax,%eax
+    jne       common_invokeMethodNoRange
+    movl      rSELF,%ecx
+    movl      offThread_method(%ecx),%ecx # ecx<- self->method
+    movzwl    2(rPC),%eax
+    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+    movl      %eax,OUT_ARG1(%esp)       # arg1<- BBBB
+    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
     movl      $METHOD_STATIC,%eax
     movl      %eax,OUT_ARG2(%esp)       # arg2<- flags
     call      dvmResolveMethod          # call(clazz,ref,flags)
@@ -7919,18 +4068,68 @@
     jne       common_invokeMethodNoRange
     jmp       common_exceptionThrown
 
-/* continuation for OP_INVOKE_INTERFACE */
-
-.LOP_INVOKE_INTERFACE_continue:
+/* ------------------------------ */
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: x86/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movzwl     4(rPC),%eax              # eax<- FEDC or CCCC
+    movl       rSELF,%ecx
+    .if        (!0)
+    andl       $0xf,%eax               # eax<- C (or stays CCCC)
+    .endif
+    GET_VREG_R   %eax %eax              # eax<- "this"
+    EXPORT_PC
+    testl      %eax,%eax                # null this?
+    je         common_errNullObject     # yes, fail
+    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
+    movl       offThread_methodClassDex(%ecx),%eax   # eax<- methodClassDex
+    movl       offThread_method(%ecx),%ecx           # ecx<- method
+    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
+    movzwl     2(rPC),%eax                         # eax<- BBBB
+    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
+    movl       %eax,OUT_ARG1(%esp)                 # arg1<- BBBB
     call       dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
     testl      %eax,%eax
     je         common_exceptionThrown
     jmp        common_invokeMethodNoRange
 
-/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+/* ------------------------------ */
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: x86/OP_UNUSED_73.S */
+/* File: x86/unused.S */
+    jmp     common_abort
 
 
-.LOP_INVOKE_VIRTUAL_RANGE_more:
+/* ------------------------------ */
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: x86/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: x86/OP_INVOKE_VIRTUAL.S */
+
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%eax
+    movzwl    2(rPC),%ecx                 # ecx<- BBBB
+    movl      offThread_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
+    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
+    testl     %eax,%eax                   # already resolved?
+    jne       .LOP_INVOKE_VIRTUAL_RANGE_continue        # yes, continue
+    movl      rSELF,%eax
+    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    movl      offThread_method(%eax),%eax   # eax<- self->method
     movl      offMethod_clazz(%eax),%eax  # ecx<- method->clazz
     movl      %eax,OUT_ARG0(%esp)         # arg0<- clazz
     movl      $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
@@ -7957,8 +4156,35 @@
     movl      (%ecx,%eax,4),%eax        # eax<- vtable[methodIndex]
     jmp       common_invokeMethodRange
 
-/* continuation for OP_INVOKE_SUPER_RANGE */
 
+/* ------------------------------ */
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: x86/OP_INVOKE_SUPER_RANGE.S */
+/* File: x86/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,rINST
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offThread_methodClassDex(rINST),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
+    movl      offThread_method(rINST),%eax # eax<- method
+    movzwl    4(rPC),rINST              # rINST<- GFED or CCCC
+    .if       (!1)
+    andl      $0xf,rINST               # rINST<- D (or stays CCCC)
+    .endif
+    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
+    testl     rINST,rINST               # null "this"?
+    je        common_errNullObject      # yes, throw
+    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
+    testl     %ecx,%ecx                 # already resolved?
+    je       .LOP_INVOKE_SUPER_RANGE_resolve
     /*
      * At this point:
      *  ecx = resolved base method [r0]
@@ -7997,10 +4223,42 @@
      */
 .LOP_INVOKE_SUPER_RANGE_nsm:
     movl    offMethod_name(%ecx),%eax
-    mov     %eax,OUT_ARG1(%esp)
     jmp     common_errNoSuchMethod
 
-/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+/* ------------------------------ */
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: x86/OP_INVOKE_DIRECT_RANGE.S */
+/* File: x86/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax              # eax<- BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movzwl    4(rPC),rIBASE            # rIBASE<- GFED or CCCC
+    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
+    .if       (!1)
+    andl      $0xf,rIBASE             # rIBASE<- D (or stays CCCC)
+    .endif
+    testl     %eax,%eax                # already resolved?
+    GET_VREG_R  %ecx rIBASE            # ecx<- "this" ptr
+    je        .LOP_INVOKE_DIRECT_RANGE_resolve      # not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+    testl     %ecx,%ecx                # null "this"?
+    jne       common_invokeMethodRange  # no, continue on
+    jmp       common_errNullObject
 
     /*
      * On entry:
@@ -8010,8 +4268,8 @@
      */
 .LOP_INVOKE_DIRECT_RANGE_resolve:
      SPILL_TMP1(%ecx)
-     movl     rGLUE,%ecx
-     movl     offGlue_method(%ecx),%ecx  # ecx<- glue->method
+     movl     rSELF,%ecx
+     movl     offThread_method(%ecx),%ecx  # ecx<- self->method
      movzwl   2(rPC),%eax      # reference (BBBB or CCCC)
      movl     offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
      movl     $METHOD_DIRECT,OUT_ARG2(%esp)
@@ -8023,9 +4281,32 @@
      jne      .LOP_INVOKE_DIRECT_RANGE_finish
      jmp      common_exceptionThrown
 
-/* continuation for OP_INVOKE_STATIC_RANGE */
 
-.LOP_INVOKE_STATIC_RANGE_continue:
+/* ------------------------------ */
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: x86/OP_INVOKE_STATIC_RANGE.S */
+/* File: x86/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
+    testl     %eax,%eax
+    jne       common_invokeMethodRange
+    movl      rSELF,%ecx
+    movl      offThread_method(%ecx),%ecx # ecx<- self->method
+    movzwl    2(rPC),%eax
+    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+    movl      %eax,OUT_ARG1(%esp)       # arg1<- BBBB
+    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
     movl      $METHOD_STATIC,%eax
     movl      %eax,OUT_ARG2(%esp)       # arg2<- flags
     call      dvmResolveMethod          # call(clazz,ref,flags)
@@ -8033,18 +4314,314 @@
     jne       common_invokeMethodRange
     jmp       common_exceptionThrown
 
-/* continuation for OP_INVOKE_INTERFACE_RANGE */
 
-.LOP_INVOKE_INTERFACE_RANGE_continue:
+/* ------------------------------ */
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: x86/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: x86/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movzwl     4(rPC),%eax              # eax<- FEDC or CCCC
+    movl       rSELF,%ecx
+    .if        (!1)
+    andl       $0xf,%eax               # eax<- C (or stays CCCC)
+    .endif
+    GET_VREG_R   %eax %eax              # eax<- "this"
+    EXPORT_PC
+    testl      %eax,%eax                # null this?
+    je         common_errNullObject     # yes, fail
+    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
+    movl       offThread_methodClassDex(%ecx),%eax   # eax<- methodClassDex
+    movl       offThread_method(%ecx),%ecx           # ecx<- method
+    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
+    movzwl     2(rPC),%eax                         # eax<- BBBB
+    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
+    movl       %eax,OUT_ARG1(%esp)                 # arg1<- BBBB
     call       dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
     testl      %eax,%eax
     je         common_exceptionThrown
     jmp        common_invokeMethodRange
 
-/* continuation for OP_FLOAT_TO_INT */
+
+/* ------------------------------ */
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: x86/OP_UNUSED_79.S */
+/* File: x86/unused.S */
+    jmp     common_abort
 
 
-.LOP_FLOAT_TO_INT_continue:
+/* ------------------------------ */
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: x86/OP_UNUSED_7A.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_NEG_INT: /* 0x7b */
+/* File: x86/OP_NEG_INT.S */
+/* File: x86/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op eax".
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx           # ecx<- A+
+    sarl     $4,rINST             # rINST<- B
+    GET_VREG_R %eax rINST           # eax<- vB
+    andb     $0xf,%cl              # ecx<- A
+    
+    
+    negl %eax
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_NOT_INT: /* 0x7c */
+/* File: x86/OP_NOT_INT.S */
+/* File: x86/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op eax".
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx           # ecx<- A+
+    sarl     $4,rINST             # rINST<- B
+    GET_VREG_R %eax rINST           # eax<- vB
+    andb     $0xf,%cl              # ecx<- A
+    
+    
+    notl %eax
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: x86/OP_NEG_LONG.S */
+    /* unop vA, vB */
+    movzbl    rINSTbl,%ecx        # ecx<- BA
+    sarl      $4,%ecx            # ecx<- B
+    andb      $0xf,rINSTbl       # rINST<- A
+    GET_VREG_WORD %eax %ecx 0     # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1     # ecx<- v[B+1]
+    negl      %eax
+    adcl      $0,%ecx
+    negl      %ecx
+    SET_VREG_WORD %eax rINST 0    # v[A+0]<- eax
+    FETCH_INST_OPCODE 1 %eax
+    SET_VREG_WORD %ecx rINST 1    # v[A+1]<- ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: x86/OP_NOT_LONG.S */
+    /* unop vA, vB */
+    movzbl    rINSTbl,%ecx       # ecx<- BA
+    sarl      $4,%ecx           # ecx<- B
+    andb      $0xf,rINSTbl      # rINST<- A
+    GET_VREG_WORD %eax %ecx 0    # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1    # ecx<- v[B+1]
+    notl      %eax
+    notl      %ecx
+    SET_VREG_WORD %eax rINST 0   # v[A+0]<- eax
+    FETCH_INST_OPCODE 1 %eax
+    SET_VREG_WORD %ecx rINST 1   # v[A+1]<- ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: x86/OP_NEG_FLOAT.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    flds    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    fchs
+    fstps  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: x86/OP_NEG_DOUBLE.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fldl    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    fchs
+    fstpl  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: x86/OP_INT_TO_LONG.S */
+    /* int to long vA, vB */
+    movzbl  rINSTbl,%eax                # eax<- +A
+    sarl    $4,%eax                    # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    andb    $0xf,rINSTbl               # rINST<- A
+    SPILL(rIBASE)                       # cltd trashes rIBASE/edx
+    cltd                                # rINST:eax<- sssssssBBBBBBBB
+    SET_VREG_WORD rIBASE rINST 1        # v[A+1]<- rIBASE/rPC
+    FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
+    SET_VREG_WORD %eax rINST 0          # v[A+0]<- %eax
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: x86/OP_INT_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fildl    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstps  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: x86/OP_INT_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fildl    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstpl  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: x86/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: x86/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movzbl rINSTbl,%eax          # eax<- BA
+    andb   $0xf,%al             # eax<- A
+    shrl   $4,rINST            # rINST<- B
+    GET_VREG_R rINST rINST
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    SET_VREG rINST %eax           # fp[A]<-fp[B]
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: x86/OP_LONG_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fildll    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstps  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: x86/OP_LONG_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fildll    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstpl  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: x86/OP_FLOAT_TO_INT.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.  This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+    /* float/double to int/long vA, vB */
+    movzbl    rINSTbl,%ecx       # ecx<- A+
+    sarl      $4,rINST         # rINST<- B
+    .if 0
+    fldl     (rFP,rINST,4)       # %st0<- vB
+    .else
+    flds     (rFP,rINST,4)       # %st0<- vB
+    .endif
+    ftst
+    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
+    movzwl   LOCAL0_OFFSET(%ebp),%eax
+    movb     $0xc,%ah
+    movw     %ax,LOCAL0_OFFSET+2(%ebp)
+    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
+    andb     $0xf,%cl                # ecx<- A
+    .if 0
+    fistpll  (rFP,%ecx,4)             # convert and store
+    .else
+    fistpl   (rFP,%ecx,4)             # convert and store
+    .endif
+    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
     .if 0
     movl     $0x80000000,%eax
     xorl     4(rFP,%ecx,4),%eax
@@ -8055,8 +4632,9 @@
     je       .LOP_FLOAT_TO_INT_special_case # fix up result
 
 .LOP_FLOAT_TO_INT_finish:
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_FLOAT_TO_INT_special_case:
     fnstsw   %ax
@@ -8074,10 +4652,39 @@
     .endif
     jmp       .LOP_FLOAT_TO_INT_finish
 
-/* continuation for OP_FLOAT_TO_LONG */
 
-
-.LOP_FLOAT_TO_LONG_continue:
+/* ------------------------------ */
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: x86/OP_FLOAT_TO_LONG.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.  This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+    /* float/double to int/long vA, vB */
+    movzbl    rINSTbl,%ecx       # ecx<- A+
+    sarl      $4,rINST         # rINST<- B
+    .if 0
+    fldl     (rFP,rINST,4)       # %st0<- vB
+    .else
+    flds     (rFP,rINST,4)       # %st0<- vB
+    .endif
+    ftst
+    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
+    movzwl   LOCAL0_OFFSET(%ebp),%eax
+    movb     $0xc,%ah
+    movw     %ax,LOCAL0_OFFSET+2(%ebp)
+    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
+    andb     $0xf,%cl                # ecx<- A
+    .if 1
+    fistpll  (rFP,%ecx,4)             # convert and store
+    .else
+    fistpl   (rFP,%ecx,4)             # convert and store
+    .endif
+    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
     .if 1
     movl     $0x80000000,%eax
     xorl     4(rFP,%ecx,4),%eax
@@ -8088,8 +4695,9 @@
     je       .LOP_FLOAT_TO_LONG_special_case # fix up result
 
 .LOP_FLOAT_TO_LONG_finish:
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_FLOAT_TO_LONG_special_case:
     fnstsw   %ax
@@ -8107,10 +4715,58 @@
     .endif
     jmp       .LOP_FLOAT_TO_LONG_finish
 
-/* continuation for OP_DOUBLE_TO_INT */
+
+/* ------------------------------ */
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: x86/OP_FLOAT_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    flds    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstpl  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
 
 
-.LOP_DOUBLE_TO_INT_continue:
+/* ------------------------------ */
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: x86/OP_DOUBLE_TO_INT.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.  This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+    /* float/double to int/long vA, vB */
+    movzbl    rINSTbl,%ecx       # ecx<- A+
+    sarl      $4,rINST         # rINST<- B
+    .if 1
+    fldl     (rFP,rINST,4)       # %st0<- vB
+    .else
+    flds     (rFP,rINST,4)       # %st0<- vB
+    .endif
+    ftst
+    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
+    movzwl   LOCAL0_OFFSET(%ebp),%eax
+    movb     $0xc,%ah
+    movw     %ax,LOCAL0_OFFSET+2(%ebp)
+    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
+    andb     $0xf,%cl                # ecx<- A
+    .if 0
+    fistpll  (rFP,%ecx,4)             # convert and store
+    .else
+    fistpl   (rFP,%ecx,4)             # convert and store
+    .endif
+    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
     .if 0
     movl     $0x80000000,%eax
     xorl     4(rFP,%ecx,4),%eax
@@ -8121,8 +4777,9 @@
     je       .LOP_DOUBLE_TO_INT_special_case # fix up result
 
 .LOP_DOUBLE_TO_INT_finish:
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_DOUBLE_TO_INT_special_case:
     fnstsw   %ax
@@ -8140,10 +4797,39 @@
     .endif
     jmp       .LOP_DOUBLE_TO_INT_finish
 
-/* continuation for OP_DOUBLE_TO_LONG */
 
-
-.LOP_DOUBLE_TO_LONG_continue:
+/* ------------------------------ */
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: x86/OP_DOUBLE_TO_LONG.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.  This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+    /* float/double to int/long vA, vB */
+    movzbl    rINSTbl,%ecx       # ecx<- A+
+    sarl      $4,rINST         # rINST<- B
+    .if 1
+    fldl     (rFP,rINST,4)       # %st0<- vB
+    .else
+    flds     (rFP,rINST,4)       # %st0<- vB
+    .endif
+    ftst
+    fnstcw   LOCAL0_OFFSET(%ebp)      # remember original rounding mode
+    movzwl   LOCAL0_OFFSET(%ebp),%eax
+    movb     $0xc,%ah
+    movw     %ax,LOCAL0_OFFSET+2(%ebp)
+    fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
+    andb     $0xf,%cl                # ecx<- A
+    .if 1
+    fistpll  (rFP,%ecx,4)             # convert and store
+    .else
+    fistpl   (rFP,%ecx,4)             # convert and store
+    .endif
+    fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
     .if 1
     movl     $0x80000000,%eax
     xorl     4(rFP,%ecx,4),%eax
@@ -8154,8 +4840,9 @@
     je       .LOP_DOUBLE_TO_LONG_special_case # fix up result
 
 .LOP_DOUBLE_TO_LONG_finish:
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_DOUBLE_TO_LONG_special_case:
     fnstsw   %ax
@@ -8173,290 +4860,2319 @@
     .endif
     jmp       .LOP_DOUBLE_TO_LONG_finish
 
-/* continuation for OP_DIV_INT */
+
+/* ------------------------------ */
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: x86/OP_DOUBLE_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+    /*
+     * Generic 32-bit FP conversion operation.
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx       # ecx<- A+
+    sarl     $4,rINST         # rINST<- B
+    fldl    (rFP,rINST,4)      # %st0<- vB
+    andb     $0xf,%cl          # ecx<- A
+    
+    fstps  (rFP,%ecx,4)        # vA<- %st0
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: x86/OP_INT_TO_BYTE.S */
+/* File: x86/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op eax".
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx           # ecx<- A+
+    sarl     $4,rINST             # rINST<- B
+    GET_VREG_R %eax rINST           # eax<- vB
+    andb     $0xf,%cl              # ecx<- A
+    
+    
+    movsbl %al,%eax
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: x86/OP_INT_TO_CHAR.S */
+/* File: x86/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op eax".
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx           # ecx<- A+
+    sarl     $4,rINST             # rINST<- B
+    GET_VREG_R %eax rINST           # eax<- vB
+    andb     $0xf,%cl              # ecx<- A
+    
+    
+    movzwl %ax,%eax
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: x86/OP_INT_TO_SHORT.S */
+/* File: x86/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op eax".
+     */
+    /* unop vA, vB */
+    movzbl   rINSTbl,%ecx           # ecx<- A+
+    sarl     $4,rINST             # rINST<- B
+    GET_VREG_R %eax rINST           # eax<- vB
+    andb     $0xf,%cl              # ecx<- A
+    
+    
+    movswl %ax,%eax
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_ADD_INT: /* 0x90 */
+/* File: x86/OP_ADD_INT.S */
+/* File: x86/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int, sub-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax   # eax<- BB
+    movzbl   3(rPC),%ecx   # ecx<- CC
+    GET_VREG_R %eax %eax   # eax<- vBB
+    addl (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_INT: /* 0x91 */
+/* File: x86/OP_SUB_INT.S */
+/* File: x86/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int, sub-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax   # eax<- BB
+    movzbl   3(rPC),%ecx   # ecx<- CC
+    GET_VREG_R %eax %eax   # eax<- vBB
+    subl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_INT: /* 0x92 */
+/* File: x86/OP_MUL_INT.S */
+    /*
+     * 32-bit binary multiplication.
+     */
+    /* mul vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    SPILL(rIBASE)
+    imull    (rFP,%ecx,4),%eax      # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_DIV_INT: /* 0x93 */
+/* File: x86/OP_DIV_INT.S */
+/* File: x86/bindiv.S */
+
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    SPILL(rIBASE)
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_DIV_INT_continue_div
+    cmpl     $0x80000000,%eax
+    jne      .LOP_DIV_INT_continue_div
+    movl     $0x80000000,%eax
+    SET_VREG %eax rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_DIV_INT_continue_div:
     cltd
     idivl   %ecx
-.LOP_DIV_INT_finish_div:
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_REM_INT */
+
+/* ------------------------------ */
+.L_OP_REM_INT: /* 0x94 */
+/* File: x86/OP_REM_INT.S */
+/* File: x86/bindiv.S */
+
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    SPILL(rIBASE)
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_REM_INT_continue_div
+    cmpl     $0x80000000,%eax
+    jne      .LOP_REM_INT_continue_div
+    movl     $0,rIBASE
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_REM_INT_continue_div:
     cltd
     idivl   %ecx
-.LOP_REM_INT_finish_div:
-    SET_VREG %edx rINST
-    FETCH_INST_OPCODE 2 %edx
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_MUL_LONG */
 
-.LOP_MUL_LONG_continue:
-    leal      (%ecx,%edx),%edx     # full result now in %edx:%eax
+/* ------------------------------ */
+.L_OP_AND_INT: /* 0x95 */
+/* File: x86/OP_AND_INT.S */
+/* File: x86/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int, sub-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax   # eax<- BB
+    movzbl   3(rPC),%ecx   # ecx<- CC
+    GET_VREG_R %eax %eax   # eax<- vBB
+    andl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_OR_INT: /* 0x96 */
+/* File: x86/OP_OR_INT.S */
+/* File: x86/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int, sub-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax   # eax<- BB
+    movzbl   3(rPC),%ecx   # ecx<- CC
+    GET_VREG_R %eax %eax   # eax<- vBB
+    orl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_INT: /* 0x97 */
+/* File: x86/OP_XOR_INT.S */
+/* File: x86/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int, sub-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax   # eax<- BB
+    movzbl   3(rPC),%ecx   # ecx<- CC
+    GET_VREG_R %eax %eax   # eax<- vBB
+    xorl   (rFP,%ecx,4),%eax                 # ex: addl    (rFP,%ecx,4),%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHL_INT: /* 0x98 */
+/* File: x86/OP_SHL_INT.S */
+/* File: x86/binop1.S */
+    /*
+     * Generic 32-bit binary operation in which both operands loaded to
+     * registers (op0 in eax, op1 in ecx).
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    sall    %cl,%eax                          # ex: addl    %ecx,%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHR_INT: /* 0x99 */
+/* File: x86/OP_SHR_INT.S */
+/* File: x86/binop1.S */
+    /*
+     * Generic 32-bit binary operation in which both operands loaded to
+     * registers (op0 in eax, op1 in ecx).
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    sarl    %cl,%eax                          # ex: addl    %ecx,%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_USHR_INT: /* 0x9a */
+/* File: x86/OP_USHR_INT.S */
+/* File: x86/binop1.S */
+    /*
+     * Generic 32-bit binary operation in which both operands loaded to
+     * registers (op0 in eax, op1 in ecx).
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax            # eax<- BB
+    movzbl   3(rPC),%ecx            # ecx<- CC
+    GET_VREG_R %eax %eax            # eax<- vBB
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    shrl    %cl,%eax                          # ex: addl    %ecx,%eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: x86/OP_ADD_LONG.S */
+/* File: x86/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop vAA, vBB, vCC */
+
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
+    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
+    addl (rFP,%ecx,4),rIBASE         # ex: addl   (rFP,%ecx,4),rIBASE
+    adcl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
+    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: x86/OP_SUB_LONG.S */
+/* File: x86/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop vAA, vBB, vCC */
+
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
+    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
+    subl (rFP,%ecx,4),rIBASE         # ex: addl   (rFP,%ecx,4),rIBASE
+    sbbl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
+    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: x86/OP_MUL_LONG.S */
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * We could definately use more free registers for
+     * this code.   We spill rINSTw (ebx),
+     * giving us eax, ebc, ecx and edx as computational
+     * temps.  On top of that, we'll spill edi (rFP)
+     * for use as the vB pointer and esi (rPC) for use
+     * as the vC pointer.  Yuck.
+     */
+    /* mul-long vAA, vBB, vCC */
+    movzbl    2(rPC),%eax              # eax<- B
+    movzbl    3(rPC),%ecx              # ecx<- C
+    SPILL_TMP2(%esi)                   # save Dalvik PC
+    SPILL(rFP)
+    SPILL(rINST)
+    SPILL(rIBASE)
+    leal      (rFP,%eax,4),%esi        # esi<- &v[B]
+    leal      (rFP,%ecx,4),rFP         # rFP<- &v[C]
+    movl      4(%esi),%ecx             # ecx<- Bmsw
+    imull     (rFP),%ecx               # ecx<- (Bmsw*Clsw)
+    movl      4(rFP),%eax              # eax<- Cmsw
+    imull     (%esi),%eax              # eax<- (Cmsw*Blsw)
+    addl      %eax,%ecx                # ecx<- (Bmsw*Clsw)+(Cmsw*Blsw)
+    movl      (rFP),%eax               # eax<- Clsw
+    mull      (%esi)                   # eax<- (Clsw*Alsw)
+    UNSPILL(rINST)
+    UNSPILL(rFP)
+    leal      (%ecx,rIBASE),rIBASE # full result now in rIBASE:%eax
     UNSPILL_TMP2(%esi)             # Restore Dalvik PC
     FETCH_INST_OPCODE 2 %ecx       # Fetch next instruction
-    movl      %edx,4(rFP,rINST,4)  # v[B+1]<- %edx
+    movl      rIBASE,4(rFP,rINST,4)# v[B+1]<- rIBASE
+    UNSPILL(rIBASE)
     movl      %eax,(rFP,rINST,4)   # v[B]<- %eax
     ADVANCE_PC 2
     GOTO_NEXT_R %ecx
 
-/* continuation for OP_DIV_LONG */
-
-.LOP_DIV_LONG_continue:
+/* ------------------------------ */
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: x86/OP_DIV_LONG.S */
+    /* div vAA, vBB, vCC */
+    movzbl    3(rPC),%eax              # eax<- CC
+    movzbl    2(rPC),%ecx              # ecx<- BB
+    SPILL(rIBASE)                      # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
+    GET_VREG_WORD %eax %eax 1
+    movl     rIBASE,OUT_ARG2(%esp)
+    testl    %eax,%eax
+    je       .LOP_DIV_LONG_check_zero
+    cmpl     $-1,%eax
+    je       .LOP_DIV_LONG_check_neg1
+.LOP_DIV_LONG_notSpecial:
+    GET_VREG_WORD rIBASE %ecx 0
+    GET_VREG_WORD %ecx %ecx 1
+.LOP_DIV_LONG_notSpecial1:
+    movl     %eax,OUT_ARG3(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
+    movl     %ecx,OUT_ARG1(%esp)
     call     __divdi3
 .LOP_DIV_LONG_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                 # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_DIV_LONG_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .LOP_DIV_LONG_notSpecial
     jmp     common_errDivideByZero
 .LOP_DIV_LONG_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .LOP_DIV_LONG_notSpecial
-    GET_VREG_WORD %edx %ecx 0
+    GET_VREG_WORD rIBASE %ecx 0
     GET_VREG_WORD %ecx %ecx 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .LOP_DIV_LONG_notSpecial1
     cmpl     $0x80000000,%ecx
     jne      .LOP_DIV_LONG_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $0x80000000,%edx
+    movl     $0x80000000,rIBASE
     jmp      .LOP_DIV_LONG_finish
 
-/* continuation for OP_REM_LONG */
-
-.LOP_REM_LONG_continue:
+/* ------------------------------ */
+.L_OP_REM_LONG: /* 0x9f */
+/* File: x86/OP_REM_LONG.S */
+/* File: x86/OP_DIV_LONG.S */
+    /* div vAA, vBB, vCC */
+    movzbl    3(rPC),%eax              # eax<- CC
+    movzbl    2(rPC),%ecx              # ecx<- BB
+    SPILL(rIBASE)                      # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
+    GET_VREG_WORD %eax %eax 1
+    movl     rIBASE,OUT_ARG2(%esp)
+    testl    %eax,%eax
+    je       .LOP_REM_LONG_check_zero
+    cmpl     $-1,%eax
+    je       .LOP_REM_LONG_check_neg1
+.LOP_REM_LONG_notSpecial:
+    GET_VREG_WORD rIBASE %ecx 0
+    GET_VREG_WORD %ecx %ecx 1
+.LOP_REM_LONG_notSpecial1:
+    movl     %eax,OUT_ARG3(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
+    movl     %ecx,OUT_ARG1(%esp)
     call     __moddi3
 .LOP_REM_LONG_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                 # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_REM_LONG_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .LOP_REM_LONG_notSpecial
     jmp     common_errDivideByZero
 .LOP_REM_LONG_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .LOP_REM_LONG_notSpecial
-    GET_VREG_WORD %edx %ecx 0
+    GET_VREG_WORD rIBASE %ecx 0
     GET_VREG_WORD %ecx %ecx 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .LOP_REM_LONG_notSpecial1
     cmpl     $0x80000000,%ecx
     jne      .LOP_REM_LONG_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $0,%edx
+    movl     $0,rIBASE
     jmp      .LOP_REM_LONG_finish
 
-/* continuation for OP_SHL_LONG */
 
-.LOP_SHL_LONG_finish:
+/* ------------------------------ */
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: x86/OP_AND_LONG.S */
+/* File: x86/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop vAA, vBB, vCC */
+
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
+    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
+    andl (rFP,%ecx,4),rIBASE         # ex: addl   (rFP,%ecx,4),rIBASE
+    andl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
+    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: x86/OP_OR_LONG.S */
+/* File: x86/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop vAA, vBB, vCC */
+
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
+    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
+    orl (rFP,%ecx,4),rIBASE         # ex: addl   (rFP,%ecx,4),rIBASE
+    orl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
+    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: x86/OP_XOR_LONG.S */
+/* File: x86/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop vAA, vBB, vCC */
+
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
+    GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
+    xorl (rFP,%ecx,4),rIBASE         # ex: addl   (rFP,%ecx,4),rIBASE
+    xorl 4(rFP,%ecx,4),%eax         # ex: adcl   4(rFP,%ecx,4),%eax
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
+    SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: x86/OP_SHL_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.  x86 shifts automatically mask off
+     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+     * case specially.
+     */
+    /* shl-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rINST */
+    /* rINSTw gets AA */
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # ecx<- v[BB+1]
+    GET_VREG_R   %ecx %ecx              # ecx<- vCC
+    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
+    shldl     %eax,rIBASE
+    sall      %cl,%eax
+    testb     $32,%cl
+    je        2f
+    movl      %eax,rIBASE
+    xorl      %eax,%eax
+2:
+    SET_VREG_WORD rIBASE rINST 1        # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0          # v[AA+0]<- %eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_SHR_LONG */
-
-
-.LOP_SHR_LONG_finish:
+/* ------------------------------ */
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: x86/OP_SHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.  x86 shifts automatically mask off
+     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+     * case specially.
+     */
+    /* shr-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # rIBASE<- v[BB+1]
+    GET_VREG_R   %ecx %ecx              # ecx<- vCC
+    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
+    shrdl     rIBASE,%eax
+    sarl      %cl,rIBASE
+    testb     $32,%cl
+    je        2f
+    movl      rIBASE,%eax
+    sarl      $31,rIBASE
+2:
+    SET_VREG_WORD rIBASE rINST 1        # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0          # v[AA+0]<- eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_USHR_LONG */
-
-
-.LOP_USHR_LONG_finish:
+/* ------------------------------ */
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: x86/OP_USHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.  x86 shifts automatically mask off
+     * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+     * case specially.
+     */
+    /* shr-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl    2(rPC),%eax               # eax<- BB
+    movzbl    3(rPC),%ecx               # ecx<- CC
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # rIBASE<- v[BB+1]
+    GET_VREG_R  %ecx %ecx               # ecx<- vCC
+    GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
+    shrdl     rIBASE,%eax
+    shrl      %cl,rIBASE
+    testb     $32,%cl
+    je        2f
+    movl      rIBASE,%eax
+    xorl      rIBASE,rIBASE
+2:
+    SET_VREG_WORD rIBASE rINST 1          # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0         # v[BB+0]<- eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_DIV_INT_2ADDR */
+/* ------------------------------ */
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: x86/OP_ADD_FLOAT.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    flds    (rFP,%eax,4)         # vCC to fp stack
+    fadds   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstps   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: x86/OP_SUB_FLOAT.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    flds    (rFP,%eax,4)         # vCC to fp stack
+    fsubs   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstps   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: x86/OP_MUL_FLOAT.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    flds    (rFP,%eax,4)         # vCC to fp stack
+    fmuls   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstps   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: x86/OP_DIV_FLOAT.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    flds    (rFP,%eax,4)         # vCC to fp stack
+    fdivs   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstps   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: x86/OP_REM_FLOAT.S */
+    /* rem_float vAA, vBB, vCC */
+    movzbl   3(rPC),%ecx            # ecx<- BB
+    movzbl   2(rPC),%eax            # eax<- CC
+    flds     (rFP,%ecx,4)           # vCC to fp stack
+    flds     (rFP,%eax,4)           # vCC to fp stack
+    movzbl   rINSTbl,%ecx           # ecx<- AA
+1:
+    fprem
+    fstsw     %ax
+    sahf
+    jp        1b
+    fstp      %st(1)
+    FETCH_INST_OPCODE 2 %eax
+    ADVANCE_PC 2
+    fstps    (rFP,%ecx,4)           # %st to vAA
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: x86/OP_ADD_DOUBLE.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    fldl    (rFP,%eax,4)         # vCC to fp stack
+    faddl   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstpl   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: x86/OP_SUB_DOUBLE.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    fldl    (rFP,%eax,4)         # vCC to fp stack
+    fsubl   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstpl   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: x86/OP_MUL_DOUBLE.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    fldl    (rFP,%eax,4)         # vCC to fp stack
+    fmull   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstpl   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: x86/OP_DIV_DOUBLE.S */
+/* File: x86/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+    /* binop vAA, vBB, vCC */
+    movzbl   2(rPC),%eax          # eax<- CC
+    movzbl   3(rPC),%ecx          # ecx<- BB
+    fldl    (rFP,%eax,4)         # vCC to fp stack
+    fdivl   (rFP,%ecx,4)         # ex: faddp
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    fstpl   (rFP,rINST,4)         # %st to vAA
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: x86/OP_REM_DOUBLE.S */
+    /* rem_float vAA, vBB, vCC */
+    movzbl   3(rPC),%ecx            # ecx<- BB
+    movzbl   2(rPC),%eax            # eax<- CC
+    fldl     (rFP,%ecx,4)           # vCC to fp stack
+    fldl     (rFP,%eax,4)           # vCC to fp stack
+    FETCH_INST_OPCODE 2 %ecx
+1:
+    fprem
+    fstsw     %ax
+    sahf
+    jp        1b
+    fstp      %st(1)
+    ADVANCE_PC 2
+    fstpl    (rFP,rINST,4)           # %st to vAA
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: x86/OP_ADD_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx               # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST              # eax<- vB
+    andb    $0xf,%cl                  # ecx<- A
+    addl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: x86/OP_SUB_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx               # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST              # eax<- vB
+    andb    $0xf,%cl                  # ecx<- A
+    subl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: x86/OP_MUL_INT_2ADDR.S */
+    /* mul vA, vB */
+    movzx   rINSTbl,%ecx              # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST             # eax<- vB
+    andb    $0xf,%cl                 # ecx<- A
+    SPILL(rIBASE)
+    imull   (rFP,%ecx,4),%eax         # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: x86/OP_DIV_INT_2ADDR.S */
+/* File: x86/bindiv2addr.S */
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* div/rem/2addr vA, vB */
+    movzx    rINSTbl,%ecx          # eax<- BA
+    SPILL(rIBASE)
+    sarl     $4,%ecx              # ecx<- B
+    GET_VREG_R %ecx %ecx           # eax<- vBB
+    andb     $0xf,rINSTbl         # rINST<- A
+    GET_VREG_R %eax rINST          # eax<- vBB
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_DIV_INT_2ADDR_continue_div2addr
+    cmpl     $0x80000000,%eax
+    jne      .LOP_DIV_INT_2ADDR_continue_div2addr
+    movl     $0x80000000,%eax
+    SET_VREG %eax rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
 .LOP_DIV_INT_2ADDR_continue_div2addr:
     cltd
     idivl   %ecx
-.LOP_DIV_INT_2ADDR_finish_div2addr:
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 1 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_REM_INT_2ADDR */
+
+/* ------------------------------ */
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: x86/OP_REM_INT_2ADDR.S */
+/* File: x86/bindiv2addr.S */
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* div/rem/2addr vA, vB */
+    movzx    rINSTbl,%ecx          # eax<- BA
+    SPILL(rIBASE)
+    sarl     $4,%ecx              # ecx<- B
+    GET_VREG_R %ecx %ecx           # eax<- vBB
+    andb     $0xf,rINSTbl         # rINST<- A
+    GET_VREG_R %eax rINST          # eax<- vBB
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_REM_INT_2ADDR_continue_div2addr
+    cmpl     $0x80000000,%eax
+    jne      .LOP_REM_INT_2ADDR_continue_div2addr
+    movl     $0,rIBASE
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
 .LOP_REM_INT_2ADDR_continue_div2addr:
     cltd
     idivl   %ecx
-.LOP_REM_INT_2ADDR_finish_div2addr:
-    SET_VREG %edx rINST
-    FETCH_INST_OPCODE 1 %edx
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_MUL_LONG_2ADDR */
 
-.LOP_MUL_LONG_2ADDR_continue:
-    leal      (%ecx,%edx),%edx         # full result now in %edx:%eax
-    movl      %edx,4(%esi)             # v[A+1]<- %edx
+/* ------------------------------ */
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: x86/OP_AND_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx               # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST              # eax<- vB
+    andb    $0xf,%cl                  # ecx<- A
+    andl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: x86/OP_OR_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx               # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST              # eax<- vB
+    andb    $0xf,%cl                  # ecx<- A
+    orl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: x86/OP_XOR_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx               # ecx<- A+
+    sarl    $4,rINST                 # rINST<- B
+    GET_VREG_R %eax rINST              # eax<- vB
+    andb    $0xf,%cl                  # ecx<- A
+    xorl     %eax,(rFP,%ecx,4)                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: x86/OP_SHL_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+    /*
+     * Generic 32-bit "shift/2addr" operation.
+     */
+    /* shift/2addr vA, vB */
+    movzx    rINSTbl,%ecx           # eax<- BA
+    sarl     $4,%ecx               # ecx<- B
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    andb     $0xf,rINSTbl          # rINST<- A
+    GET_VREG_R %eax rINST           # eax<- vAA
+    sall    %cl,%eax                          # ex: sarl %cl,%eax
+    FETCH_INST_OPCODE 1 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: x86/OP_SHR_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+    /*
+     * Generic 32-bit "shift/2addr" operation.
+     */
+    /* shift/2addr vA, vB */
+    movzx    rINSTbl,%ecx           # eax<- BA
+    sarl     $4,%ecx               # ecx<- B
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    andb     $0xf,rINSTbl          # rINST<- A
+    GET_VREG_R %eax rINST           # eax<- vAA
+    sarl    %cl,%eax                          # ex: sarl %cl,%eax
+    FETCH_INST_OPCODE 1 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: x86/OP_USHR_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+    /*
+     * Generic 32-bit "shift/2addr" operation.
+     */
+    /* shift/2addr vA, vB */
+    movzx    rINSTbl,%ecx           # eax<- BA
+    sarl     $4,%ecx               # ecx<- B
+    GET_VREG_R %ecx %ecx            # eax<- vBB
+    andb     $0xf,rINSTbl          # rINST<- A
+    GET_VREG_R %eax rINST           # eax<- vAA
+    shrl    %cl,%eax                          # ex: sarl %cl,%eax
+    FETCH_INST_OPCODE 1 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: x86/OP_ADD_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop/2addr vA, vB */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
+    andb      $0xF,rINSTbl             # rINST<- A
+    addl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
+    adcl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: x86/OP_SUB_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop/2addr vA, vB */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
+    andb      $0xF,rINSTbl             # rINST<- A
+    subl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
+    sbbl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: x86/OP_MUL_LONG_2ADDR.S */
+    /*
+     * Signed 64-bit integer multiply, 2-addr version
+     *
+     * We could definately use more free registers for
+     * this code.  We must spill %edx (rIBASE) because it
+     * is used by imul.  We'll also spill rINST (ebx),
+     * giving us eax, ebc, ecx and rIBASE as computational
+     * temps.  On top of that, we'll spill %esi (edi)
+     * for use as the vA pointer and rFP (esi) for use
+     * as the vB pointer.  Yuck.
+     */
+    /* mul-long/2addr vA, vB */
+    movzbl    rINSTbl,%eax             # eax<- BA
+    andb      $0xf,%al                # eax<- A
+    sarl      $4,rINST                # rINST<- B
+    SPILL_TMP2(%esi)
+    SPILL(rFP)
+    SPILL(rIBASE)
+    leal      (rFP,%eax,4),%esi        # %esi<- &v[A]
+    leal      (rFP,rINST,4),rFP        # rFP<- &v[B]
+    movl      4(%esi),%ecx             # ecx<- Amsw
+    imull     (rFP),%ecx               # ecx<- (Amsw*Blsw)
+    movl      4(rFP),%eax              # eax<- Bmsw
+    imull     (%esi),%eax              # eax<- (Bmsw*Alsw)
+    addl      %eax,%ecx                # ecx<- (Amsw*Blsw)+(Bmsw*Alsw)
+    movl      (rFP),%eax               # eax<- Blsw
+    mull      (%esi)                   # eax<- (Blsw*Alsw)
+    leal      (%ecx,rIBASE),rIBASE     # full result now in %edx:%eax
+    movl      rIBASE,4(%esi)           # v[A+1]<- rIBASE
     movl      %eax,(%esi)              # v[A]<- %eax
     UNSPILL_TMP2(%esi)
     FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
     UNSPILL(rFP)
     ADVANCE_PC 1
     GOTO_NEXT_R %ecx
 
-/* continuation for OP_DIV_LONG_2ADDR */
-
-.LOP_DIV_LONG_2ADDR_continue:
+/* ------------------------------ */
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: x86/OP_DIV_LONG_2ADDR.S */
+    /* div/2addr vA, vB */
+    movzbl    rINSTbl,%eax
+    shrl      $4,%eax                  # eax<- B
+    andb      $0xf,rINSTbl             # rINST<- A
+    SPILL(rIBASE)                       # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
+    GET_VREG_WORD %eax %eax 1
+    movl     rIBASE,OUT_ARG2(%esp)
+    testl    %eax,%eax
+    je       .LOP_DIV_LONG_2ADDR_check_zero
+    cmpl     $-1,%eax
+    je       .LOP_DIV_LONG_2ADDR_check_neg1
+.LOP_DIV_LONG_2ADDR_notSpecial:
+    GET_VREG_WORD rIBASE rINST 0
+    GET_VREG_WORD %ecx rINST 1
+.LOP_DIV_LONG_2ADDR_notSpecial1:
     movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
     movl     %ecx,OUT_ARG1(%esp)
     call     __divdi3
 .LOP_DIV_LONG_2ADDR_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                    # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_DIV_LONG_2ADDR_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .LOP_DIV_LONG_2ADDR_notSpecial
     jmp     common_errDivideByZero
 .LOP_DIV_LONG_2ADDR_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .LOP_DIV_LONG_2ADDR_notSpecial
-    GET_VREG_WORD %edx rINST 0
+    GET_VREG_WORD rIBASE rINST 0
     GET_VREG_WORD %ecx rINST 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .LOP_DIV_LONG_2ADDR_notSpecial1
     cmpl     $0x80000000,%ecx
     jne      .LOP_DIV_LONG_2ADDR_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $0x80000000,%edx
+    movl     $0x80000000,rIBASE
     jmp      .LOP_DIV_LONG_2ADDR_finish
 
-/* continuation for OP_REM_LONG_2ADDR */
-
-.LOP_REM_LONG_2ADDR_continue:
+/* ------------------------------ */
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: x86/OP_REM_LONG_2ADDR.S */
+/* File: x86/OP_DIV_LONG_2ADDR.S */
+    /* div/2addr vA, vB */
+    movzbl    rINSTbl,%eax
+    shrl      $4,%eax                  # eax<- B
+    andb      $0xf,rINSTbl             # rINST<- A
+    SPILL(rIBASE)                       # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
+    GET_VREG_WORD %eax %eax 1
+    movl     rIBASE,OUT_ARG2(%esp)
+    testl    %eax,%eax
+    je       .LOP_REM_LONG_2ADDR_check_zero
+    cmpl     $-1,%eax
+    je       .LOP_REM_LONG_2ADDR_check_neg1
+.LOP_REM_LONG_2ADDR_notSpecial:
+    GET_VREG_WORD rIBASE rINST 0
+    GET_VREG_WORD %ecx rINST 1
+.LOP_REM_LONG_2ADDR_notSpecial1:
     movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
     movl     %ecx,OUT_ARG1(%esp)
     call     __moddi3
 .LOP_REM_LONG_2ADDR_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                    # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_REM_LONG_2ADDR_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .LOP_REM_LONG_2ADDR_notSpecial
     jmp     common_errDivideByZero
 .LOP_REM_LONG_2ADDR_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .LOP_REM_LONG_2ADDR_notSpecial
-    GET_VREG_WORD %edx rINST 0
+    GET_VREG_WORD rIBASE rINST 0
     GET_VREG_WORD %ecx rINST 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .LOP_REM_LONG_2ADDR_notSpecial1
     cmpl     $0x80000000,%ecx
     jne      .LOP_REM_LONG_2ADDR_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $0,%edx
+    movl     $0,rIBASE
     jmp      .LOP_REM_LONG_2ADDR_finish
 
-/* continuation for OP_SHL_LONG_2ADDR */
+
+/* ------------------------------ */
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: x86/OP_AND_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop/2addr vA, vB */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
+    andb      $0xF,rINSTbl             # rINST<- A
+    andl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
+    andl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
 
 
-.LOP_SHL_LONG_2ADDR_finish:
-    FETCH_INST_OPCODE 1 %edx
+/* ------------------------------ */
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: x86/OP_OR_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop/2addr vA, vB */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
+    andb      $0xF,rINSTbl             # rINST<- A
+    orl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
+    orl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: x86/OP_XOR_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+    /*
+     * Generic 64-bit binary operation.
+     */
+    /* binop/2addr vA, vB */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_WORD %eax %ecx 0           # eax<- v[B+0]
+    GET_VREG_WORD %ecx %ecx 1           # eax<- v[B+1]
+    andb      $0xF,rINSTbl             # rINST<- A
+    xorl %eax,(rFP,rINST,4)         # example: addl   %eax,(rFP,rINST,4)
+    xorl %ecx,4(rFP,rINST,4)         # example: adcl   %ecx,4(rFP,rINST,4)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: x86/OP_SHL_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl    rINSTbl,%ecx             # ecx<- BA
+    andb      $0xf,rINSTbl            # rINST<- A
+    GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
+    sarl      $4,%ecx                 # ecx<- B
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1       # rIBASE<- v[AA+1]
+    GET_VREG_R  %ecx %ecx              # ecx<- vBB
+    shldl     %eax,rIBASE
+    sall      %cl,%eax
+    testb     $32,%cl
+    je        2f
+    movl      %eax,rIBASE
+    xorl      %eax,%eax
+2:
+    SET_VREG_WORD rIBASE rINST 1       # v[AA+1]<- rIBASE
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     SET_VREG_WORD %eax rINST 0         # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_SHR_LONG_2ADDR */
-
-
-.LOP_SHR_LONG_2ADDR_finish:
-    FETCH_INST_OPCODE 1 %edx
+/* ------------------------------ */
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: x86/OP_SHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl    rINSTbl,%ecx         # ecx<- BA
+    andb      $0xf,rINSTbl        # rINST<- A
+    GET_VREG_WORD %eax rINST 0     # eax<- v[AA+0]
+    sarl      $4,%ecx             # ecx<- B
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1   # rIBASE<- v[AA+1]
+    GET_VREG_R %ecx %ecx           # ecx<- vBB
+    shrdl     rIBASE,%eax
+    sarl      %cl,rIBASE
+    testb     $32,%cl
+    je        2f
+    movl      rIBASE,%eax
+    sarl      $31,rIBASE
+2:
+    SET_VREG_WORD rIBASE rINST 1   # v[AA+1]<- rIBASE
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     SET_VREG_WORD %eax rINST 0    # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_USHR_LONG_2ADDR */
-
-
-.LOP_USHR_LONG_2ADDR_finish:
-    FETCH_INST_OPCODE 1 %edx
+/* ------------------------------ */
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: x86/OP_USHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl    rINSTbl,%ecx             # ecx<- BA
+    andb      $0xf,rINSTbl            # rINST<- A
+    GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
+    sarl      $4,%ecx                 # ecx<- B
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1       # rIBASE<- v[AA+1]
+    GET_VREG_R %ecx %ecx               # ecx<- vBB
+    shrdl     rIBASE,%eax
+    shrl      %cl,rIBASE
+    testb     $32,%cl
+    je        2f
+    movl      rIBASE,%eax
+    xorl      rIBASE,rIBASE
+2:
+    SET_VREG_WORD rIBASE rINST 1       # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0         # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_DIV_INT_LIT16 */
+/* ------------------------------ */
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: x86/OP_ADD_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    flds    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fadds   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstps    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: x86/OP_SUB_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    flds    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fsubs   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstps    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: x86/OP_MUL_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    flds    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fmuls   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstps    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: x86/OP_DIV_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    flds    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fdivs   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstps    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: x86/OP_REM_FLOAT_2ADDR.S */
+    /* rem_float/2addr vA, vB */
+    movzx   rINSTbl,%ecx                # ecx<- A+
+    sarl    $4,rINST                  # rINST<- B
+    flds     (rFP,rINST,4)              # vBB to fp stack
+    andb    $0xf,%cl                   # ecx<- A
+    flds     (rFP,%ecx,4)               # vAA to fp stack
+1:
+    fprem
+    fstsw     %ax
+    sahf
+    jp        1b
+    fstp      %st(1)
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstps    (rFP,%ecx,4)               # %st to vA
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: x86/OP_ADD_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    fldl    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    faddl   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstpl    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: x86/OP_SUB_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    fldl    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fsubl   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstpl    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: x86/OP_MUL_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    fldl    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fmull   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstpl    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: x86/OP_DIV_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl,%ecx           # ecx<- A+
+    andb    $0xf,%cl              # ecx<- A
+    fldl    (rFP,%ecx,4)          # vAA to fp stack
+    sarl    $4,rINST             # rINST<- B
+    fdivl   (rFP,rINST,4)         # ex: faddp
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstpl    (rFP,%ecx,4)         # %st to vA
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: x86/OP_REM_DOUBLE_2ADDR.S */
+    /* rem_float/2addr vA, vB */
+    movzx   rINSTbl,%ecx                # ecx<- A+
+    sarl    $4,rINST                  # rINST<- B
+    fldl     (rFP,rINST,4)              # vBB to fp stack
+    andb    $0xf,%cl                   # ecx<- A
+    fldl     (rFP,%ecx,4)               # vAA to fp stack
+1:
+    fprem
+    fstsw     %ax
+    sahf
+    jp        1b
+    fstp      %st(1)
+    FETCH_INST_OPCODE 1 %eax
+    ADVANCE_PC 1
+    fstpl    (rFP,%ecx,4)               # %st to vA
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: x86/OP_ADD_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int/lit16, rsub-int,
+     *      and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    addl %ecx,%eax                              # for example: addl %ecx, %eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: x86/OP_RSUB_INT.S */
+/* File: x86/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int/lit16, rsub-int,
+     *      and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    subl %eax,%ecx                              # for example: addl %ecx, %eax
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: x86/OP_MUL_INT_LIT16.S */
+    /* mul/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    SPILL(rIBASE)
+    imull     %ecx,%eax                 # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: x86/OP_DIV_INT_LIT16.S */
+/* File: x86/bindivLit16.S */
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movzbl   rINSTbl,%eax         # eax<- 000000BA
+    SPILL(rIBASE)
+    sarl     $4,%eax             # eax<- B
+    GET_VREG_R %eax %eax          # eax<- vB
+    movswl   2(rPC),%ecx          # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl        # rINST<- A
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_DIV_INT_LIT16_continue_div
+    cmpl     $0x80000000,%eax
+    jne      .LOP_DIV_INT_LIT16_continue_div
+    movl     $0x80000000,%eax
+    SET_VREG %eax rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_DIV_INT_LIT16_continue_div:
     cltd
     idivl   %ecx
-.LOP_DIV_INT_LIT16_finish_div:
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_REM_INT_LIT16 */
+
+/* ------------------------------ */
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: x86/OP_REM_INT_LIT16.S */
+/* File: x86/bindivLit16.S */
+    /*
+     * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+     * op1=-1.
+     */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movzbl   rINSTbl,%eax         # eax<- 000000BA
+    SPILL(rIBASE)
+    sarl     $4,%eax             # eax<- B
+    GET_VREG_R %eax %eax          # eax<- vB
+    movswl   2(rPC),%ecx          # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl        # rINST<- A
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $-1,%ecx
+    jne      .LOP_REM_INT_LIT16_continue_div
+    cmpl     $0x80000000,%eax
+    jne      .LOP_REM_INT_LIT16_continue_div
+    movl     $0,rIBASE
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_REM_INT_LIT16_continue_div:
     cltd
     idivl   %ecx
-.LOP_REM_INT_LIT16_finish_div:
-    SET_VREG %edx rINST
-    FETCH_INST_OPCODE 2 %edx
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_DIV_INT_LIT8 */
+
+/* ------------------------------ */
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: x86/OP_AND_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int/lit16, rsub-int,
+     *      and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    andl %ecx,%eax                              # for example: addl %ecx, %eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: x86/OP_OR_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int/lit16, rsub-int,
+     *      and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    orl     %ecx,%eax                              # for example: addl %ecx, %eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: x86/OP_XOR_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than eax, you can override "result".)
+     *
+     * For: add-int/lit16, rsub-int,
+     *      and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl   rINSTbl,%eax               # eax<- 000000BA
+    sarl     $4,%eax                   # eax<- B
+    GET_VREG_R %eax %eax                # eax<- vB
+    movswl   2(rPC),%ecx                # ecx<- ssssCCCC
+    andb     $0xf,rINSTbl              # rINST<- A
+    xor    %ecx,%eax                              # for example: addl %ecx, %eax
+    SET_VREG %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: x86/OP_ADD_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    addl %ecx,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: x86/OP_RSUB_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    subl  %eax,%ecx                             # ex: addl %ecx,%eax
+    SET_VREG   %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: x86/OP_MUL_INT_LIT8.S */
+    /* mul/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    SPILL(rIBASE)
+    imull     %ecx,%eax                # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG  %eax rINST
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: x86/OP_DIV_INT_LIT8.S */
+/* File: x86/bindivLit8.S */
+    /*
+     * 32-bit div/rem "lit8" binary operation.  Handles special case of
+     * op0=minint & op1=-1
+     */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax        # eax<- BB
+    movsbl    3(rPC),%ecx        # ecx<- ssssssCC
+    SPILL(rIBASE)
+    GET_VREG_R  %eax %eax        # eax<- rBB
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $0x80000000,%eax
+    jne      .LOP_DIV_INT_LIT8_continue_div
+    cmpl     $-1,%ecx
+    jne      .LOP_DIV_INT_LIT8_continue_div
+    movl     $0x80000000,%eax
+    SET_VREG %eax rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_DIV_INT_LIT8_continue_div:
     cltd
     idivl   %ecx
-.LOP_DIV_INT_LIT8_finish_div:
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_REM_INT_LIT8 */
+
+/* ------------------------------ */
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: x86/OP_REM_INT_LIT8.S */
+/* File: x86/bindivLit8.S */
+    /*
+     * 32-bit div/rem "lit8" binary operation.  Handles special case of
+     * op0=minint & op1=-1
+     */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax        # eax<- BB
+    movsbl    3(rPC),%ecx        # ecx<- ssssssCC
+    SPILL(rIBASE)
+    GET_VREG_R  %eax %eax        # eax<- rBB
+    cmpl     $0,%ecx
+    je       common_errDivideByZero
+    cmpl     $0x80000000,%eax
+    jne      .LOP_REM_INT_LIT8_continue_div
+    cmpl     $-1,%ecx
+    jne      .LOP_REM_INT_LIT8_continue_div
+    movl     $0,rIBASE
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .LOP_REM_INT_LIT8_continue_div:
     cltd
     idivl   %ecx
-.LOP_REM_INT_LIT8_finish_div:
-    SET_VREG %edx rINST
-    FETCH_INST_OPCODE 2 %edx
+    SET_VREG rIBASE rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IGET_VOLATILE */
+    GOTO_NEXT_R %ecx
 
 
-.LOP_IGET_VOLATILE_resolve:
+/* ------------------------------ */
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: x86/OP_AND_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    andl %ecx,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: x86/OP_OR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    orl     %ecx,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: x86/OP_XOR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    xor    %ecx,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: x86/OP_SHL_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    sall  %cl,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: x86/OP_SHR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    sarl    %cl,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: x86/OP_USHR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = eax op ecx".
+     * This could be an x86 instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * For: add-int/lit8, rsub-int/lit8
+     *      and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl    2(rPC),%eax              # eax<- BB
+    movsbl    3(rPC),%ecx              # ecx<- ssssssCC
+    GET_VREG_R   %eax %eax             # eax<- rBB
+    shrl     %cl,%eax                             # ex: addl %ecx,%eax
+    SET_VREG   %eax rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: x86/OP_IGET_VOLATILE.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_VOLATILE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -8474,21 +7190,43 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-
-/* continuation for OP_IPUT_VOLATILE */
+    GOTO_NEXT_R %eax
 
 
-.LOP_IPUT_VOLATILE_resolve:
+/* ------------------------------ */
+.L_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: x86/OP_IPUT_VOLATILE.S */
+/* File: x86/OP_IPUT.S */
+
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_VOLATILE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -8506,56 +7244,128 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     movl   rINST,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_SGET_VOLATILE */
+
+/* ------------------------------ */
+.L_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: x86/OP_SGET_VOLATILE.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_VOLATILE_resolve                # if not, make it so
+.LOP_SGET_VOLATILE_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_VOLATILE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_VOLATILE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_VOLATILE */
+
+/* ------------------------------ */
+.L_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: x86/OP_SPUT_VOLATILE.S */
+/* File: x86/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_VOLATILE_resolve                # if not, make it so
+.LOP_SPUT_VOLATILE_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SPUT_VOLATILE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_VOLATILE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_IGET_OBJECT_VOLATILE */
 
-
-.LOP_IGET_OBJECT_VOLATILE_resolve:
+/* ------------------------------ */
+.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: x86/OP_IGET_OBJECT_VOLATILE.S */
+/* File: x86/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_OBJECT_VOLATILE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -8573,13 +7383,110 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
 
-/* continuation for OP_EXECUTE_INLINE */
+
+/* ------------------------------ */
+.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IGET_WIDE_VOLATILE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IPUT_WIDE_VOLATILE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SGET_WIDE_VOLATILE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SPUT_WIDE_VOLATILE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_BREAKPOINT: /* 0xec */
+/* File: x86/OP_BREAKPOINT.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: x86/OP_THROW_VERIFICATION_ERROR.S */
+    /*
+     * Handle a throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by AA, with some detail provided by BBBB.
+     */
+    /* op AA, ref@BBBB */
+    movl     rSELF,%ecx
+    movzwl   2(rPC),%eax                     # eax<- BBBB
+    movl     offThread_method(%ecx),%ecx       # ecx<- self->method
+    EXPORT_PC
+    movl     %eax,OUT_ARG2(%esp)             # arg2<- BBBB
+    movl     rINST,OUT_ARG1(%esp)            # arg1<- AA
+    movl     %ecx,OUT_ARG0(%esp)             # arg0<- method
+    call     dvmThrowVerificationError       # call(method, kind, ref)
+    jmp      common_exceptionThrown          # handle exception
+
+/* ------------------------------ */
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: x86/OP_EXECUTE_INLINE.S */
+    /*
+     * Execute a "native inline" instruction.
+     *
+     * We will be calling through a function table:
+     *
+     * (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3, pResult)
+     *
+     * Ignores argument count - always loads 4.
+     *
+     */
+    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+    movl      rSELF,%ecx
+    EXPORT_PC
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    leal      offThread_retval(%ecx),%ecx # ecx<- & self->retval
+    SPILL(rIBASE)                       # preserve rIBASE
+    movl      %ecx,OUT_ARG4(%esp)
+    call      .LOP_EXECUTE_INLINE_continue      # make call; will return after
+    UNSPILL(rIBASE)                     # restore rIBASE
+    testl     %eax,%eax                 # successful?
+    FETCH_INST_OPCODE 3 %ecx
+    je        common_exceptionThrown    # no, handle exception
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
 
 .LOP_EXECUTE_INLINE_continue:
     /*
@@ -8591,58 +7498,327 @@
      *
      *  Go ahead and load all 4 args, even if not used.
      */
-    movzwl    4(rPC),%edx
+    movzwl    4(rPC),rIBASE
 
     movl      $0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $4,%edx
+    sarl      $4,rIBASE
     movl      %ecx,4+OUT_ARG0(%esp)
 
     movl      $0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $4,%edx
+    sarl      $4,rIBASE
     movl      %ecx,4+OUT_ARG1(%esp)
 
     movl      $0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $4,%edx
+    sarl      $4,rIBASE
     movl      %ecx,4+OUT_ARG2(%esp)
 
     movl      $0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $4,%edx
+    sarl      $4,rIBASE
     movl      %ecx,4+OUT_ARG3(%esp)
 
     sall      $4,%eax      # index *= sizeof(table entry)
     jmp       *gDvmInlineOpsTable(%eax)
     # will return to caller of .LOP_EXECUTE_INLINE_continue
 
-/* continuation for OP_IPUT_OBJECT_QUICK */
+/* ------------------------------ */
+.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_EXECUTE_INLINE_RANGE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_INVOKE_OBJECT_INIT_RANGE     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_RETURN_VOID_BARRIER     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: x86/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    cmpl      $0,%ecx                  # is object null?
+    je        common_errNullObject
+    movl      (%ecx,%eax,1),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    andb      $0xf,rINSTbl             # rINST<- A
+    SET_VREG  %eax rINST                # fp[A]<- result
+    GOTO_NEXT_R %ecx
 
-.LOP_IPUT_OBJECT_QUICK_finish:
+/* ------------------------------ */
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: x86/OP_IGET_WIDE_QUICK.S */
+    /* For: iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    cmpl      $0,%ecx                  # is object null?
+    je        common_errNullObject
+    leal      (%ecx,%eax,1),%eax        # eax<- address of 64-bit source
+    movl      (%eax),%ecx               # ecx<- lsw
+    movl      4(%eax),%eax              # eax<- msw
+    andb      $0xf,rINSTbl             # rINST<- A
+    SET_VREG_WORD %ecx rINST 0          # v[A+0]<- lsw
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG_WORD %eax rINST 1          # v[A+1]<- msw
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: x86/OP_IGET_OBJECT_QUICK.S */
+/* File: x86/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    cmpl      $0,%ecx                  # is object null?
+    je        common_errNullObject
+    movl      (%ecx,%eax,1),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    andb      $0xf,rINSTbl             # rINST<- A
+    SET_VREG  %eax rINST                # fp[A]<- result
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: x86/OP_IPUT_QUICK.S */
+    /* For: iput-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    andb      $0xf,rINSTbl             # rINST<- A
+    GET_VREG_R  rINST,rINST             # rINST<- v[A]
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    testl     %ecx,%ecx                 # is object null?
+    je        common_errNullObject
+    movl      rINST,(%ecx,%eax,1)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: x86/OP_IPUT_WIDE_QUICK.S */
+    /* For: iput-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    testl      %ecx,%ecx                # is object null?
+    je        common_errNullObject
+    leal      (%ecx,%eax,1),%ecx        # ecx<- Address of 64-bit target
+    andb      $0xf,rINSTbl             # rINST<- A
+    GET_VREG_WORD %eax rINST 0          # eax<- lsw
+    GET_VREG_WORD rINST rINST 1         # rINST<- msw
+    movl      %eax,(%ecx)
+    movl      rINST,4(%ecx)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: x86/OP_IPUT_OBJECT_QUICK.S */
+    /* For: iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl    rINSTbl,%ecx              # ecx<- BA
+    sarl      $4,%ecx                  # ecx<- B
+    GET_VREG_R  %ecx %ecx               # vB (object we're operating on)
+    andb      $0xf,rINSTbl             # rINST<- A
+    GET_VREG_R  rINST rINST             # rINST<- v[A]
+    movzwl    2(rPC),%eax               # eax<- field byte offset
+    testl     %ecx,%ecx                 # is object null?
+    je        common_errNullObject
+    movl      rINST,(%ecx,%eax,1)
+    movl      rSELF,%eax
     testl     rINST,rINST               # did we store null?
-    FETCH_INST_OPCODE 2 %edx
-    movl      offGlue_cardTable(%eax),%eax  # get card table base
+    movl      offThread_cardTable(%eax),%eax  # get card table base
     je        1f                            # skip card mark if null store
     shrl      $GC_CARD_SHIFT,%ecx          # object head to card number
     movb      %al,(%eax,%ecx)               # mark card based on object head
 1:
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_IPUT_OBJECT_VOLATILE */
+/* ------------------------------ */
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movzwl    4(rPC),%eax               # eax<- FEDC or CCCC
+    movzwl    2(rPC),%ecx               # ecx<- BBBB
+    .if     (!0)
+    andl      $0xf,%eax                # eax<- C (or stays CCCC)
+    .endif
+    GET_VREG_R  %eax %eax               # eax<- vC ("this" ptr)
+    testl     %eax,%eax                 # null?
+    je        common_errNullObject      # yep, throw exception
+    movl      offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
+    movl      offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
+    EXPORT_PC                           # might throw later - get ready
+    movl      (%eax,%ecx,4),%eax        # eax<- vtable[BBBB]
+    jmp       common_invokeMethodNoRange
+
+/* ------------------------------ */
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movzwl    4(rPC),%eax               # eax<- FEDC or CCCC
+    movzwl    2(rPC),%ecx               # ecx<- BBBB
+    .if     (!1)
+    andl      $0xf,%eax                # eax<- C (or stays CCCC)
+    .endif
+    GET_VREG_R  %eax %eax               # eax<- vC ("this" ptr)
+    testl     %eax,%eax                 # null?
+    je        common_errNullObject      # yep, throw exception
+    movl      offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
+    movl      offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
+    EXPORT_PC                           # might throw later - get ready
+    movl      (%eax,%ecx,4),%eax        # eax<- vtable[BBBB]
+    jmp       common_invokeMethodRange
 
 
-.LOP_IPUT_OBJECT_VOLATILE_resolve:
+/* ------------------------------ */
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: x86/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    4(rPC),%eax               # eax<- GFED or CCCC
+    movl      offThread_method(%ecx),%ecx # ecx<- current method
+    .if       (!0)
+    andl      $0xf,%eax                # eax<- D (or stays CCCC)
+    .endif
+    movl      offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+    GET_VREG_R  %eax %eax               # eax<- "this"
+    movl      offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
+    testl     %eax,%eax                 # null "this"?
+    je        common_errNullObject      # "this" is null, throw exception
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offClassObject_vtable(%ecx),%ecx # ecx<- vtable
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl      (%ecx,%eax,4),%eax        # eax<- super->vtable[BBBB]
+    jmp       common_invokeMethodNoRange
+
+/* ------------------------------ */
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: x86/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: x86/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    movl      rSELF,%ecx
+    movzwl    4(rPC),%eax               # eax<- GFED or CCCC
+    movl      offThread_method(%ecx),%ecx # ecx<- current method
+    .if       (!1)
+    andl      $0xf,%eax                # eax<- D (or stays CCCC)
+    .endif
+    movl      offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+    GET_VREG_R  %eax %eax               # eax<- "this"
+    movl      offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
+    testl     %eax,%eax                 # null "this"?
+    je        common_errNullObject      # "this" is null, throw exception
+    movzwl    2(rPC),%eax               # eax<- BBBB
+    movl      offClassObject_vtable(%ecx),%ecx # ecx<- vtable
+    EXPORT_PC
+    movl      (%ecx,%eax,4),%eax        # eax<- super->vtable[BBBB]
+    jmp       common_invokeMethodRange
+
+
+/* ------------------------------ */
+.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: x86/OP_IPUT_OBJECT_VOLATILE.S */
+/* File: x86/OP_IPUT_OBJECT.S */
+    /*
+     * Object field put.
+     *
+     * for: iput-object
+     */
+    /* op vA, vB, field@CCCC */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzbl  rINSTbl,%ecx                        # ecx<- BA
+    sarl    $4,%ecx                            # ecx<- B
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    andb    $0xf,rINSTbl                       # rINST<- A
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
+    movl    (%eax,rIBASE,4),%eax                  # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_OBJECT_VOLATILE_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -8654,7 +7830,7 @@
      * Currently:
      *   eax holds resolved field
      *   ecx holds object
-     *   %edx is scratch, but needs to be unspilled
+     *   rIBASE is scratch, but needs to be unspilled
      *   rINST holds A
      */
     GET_VREG_R rINST rINST                      # rINST<- v[A]
@@ -8662,69 +7838,13405 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl    rINST,(%ecx,%eax)      # obj.field <- v[A](8/16/32 bits)
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     testl   rINST,rINST                         # stored a NULL?
-    movl    offGlue_cardTable(%eax),%eax        # get card table base
-    FETCH_INST_OPCODE 2 %edx
+    movl    offThread_cardTable(%eax),%eax      # get card table base
     je      1f                                  # skip card mark if null store
     shrl    $GC_CARD_SHIFT,%ecx                # object head to card number
     movb    %al,(%eax,%ecx)                     # mark card using object head
 1:
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
-/* continuation for OP_SGET_OBJECT_VOLATILE */
+
+/* ------------------------------ */
+.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: x86/OP_SGET_OBJECT_VOLATILE.S */
+/* File: x86/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_OBJECT_VOLATILE_resolve                # if not, make it so
+.LOP_SGET_OBJECT_VOLATILE_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .LOP_SGET_OBJECT_VOLATILE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SGET_OBJECT_VOLATILE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-/* continuation for OP_SPUT_OBJECT_VOLATILE */
 
-
-.LOP_SPUT_OBJECT_VOLATILE_continue:
+/* ------------------------------ */
+.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: x86/OP_SPUT_OBJECT_VOLATILE.S */
+/* File: x86/OP_SPUT_OBJECT.S */
+    /*
+     * SPUT object handler.
+     */
+    /* op vAA, field@BBBB */
+    movl      rSELF,%ecx
+    movzwl    2(rPC),%eax                        # eax<- field ref BBBB
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_OBJECT_VOLATILE_resolve                # if not, make it so
+.LOP_SPUT_OBJECT_VOLATILE_finish:                              # field ptr in eax
+    movzbl    rINSTbl,%ecx                       # ecx<- AA
+    GET_VREG_R  %ecx %ecx
     movl      %ecx,offStaticField_value(%eax)    # do the store
     testl     %ecx,%ecx                          # stored null object ptr?
-    FETCH_INST_OPCODE 2 %edx
     je        1f                                 # skip card mark if null
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movl      offField_clazz(%eax),%eax          # eax<- method->clazz
-    movl      offGlue_cardTable(%ecx),%ecx       # get card table base
+    movl      offThread_cardTable(%ecx),%ecx       # get card table base
     shrl      $GC_CARD_SHIFT,%eax               # head to card number
     movb      %cl,(%ecx,%eax)                    # mark card
 1:
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .LOP_SPUT_OBJECT_VOLATILE_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .LOP_SPUT_OBJECT_VOLATILE_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
 
-    .size   dvmAsmSisterStart, .-dvmAsmSisterStart
-    .global dvmAsmSisterEnd
-dvmAsmSisterEnd:
 
+/* ------------------------------ */
+.L_OP_DISPATCH_FF: /* 0xff */
+/* File: x86/OP_DISPATCH_FF.S */
+    leal      256(rINST),%ecx
+    GOTO_NEXT_JUMBO_R %ecx
+
+/* ------------------------------ */
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: x86/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax              # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
+    movl      (%ecx,%eax,4),%eax       # eax<- rResClasses[AAAAAAAA]
+    FETCH_INST_OPCODE 4 %ecx
+    testl     %eax,%eax                # resolved yet?
+    je        .LOP_CONST_CLASS_JUMBO_resolve
+    SET_VREG  %eax rINST               # vBBBB<- rResClasses[AAAAAAAA]
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+/* This is the less common path, so we'll redo some work
+   here rather than force spills on the common path */
+.LOP_CONST_CLASS_JUMBO_resolve:
+    movl     rSELF,%eax
+    EXPORT_PC
+    movl     offThread_method(%eax),%eax # eax<- self->method
+    movl     $1,OUT_ARG2(%esp)        # true
+    movl     2(rPC),%ecx               # ecx<- AAAAAAAA
+    movl     offMethod_clazz(%eax),%eax
+    movl     %ecx,OUT_ARG1(%esp)
+    movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveClass           # go resolve
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                 # failed?
+    je       common_exceptionThrown
+    FETCH_INST_OPCODE 4 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: x86/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    movl      rSELF,%ecx
+    GET_VREG_R  rINST,rINST             # rINST<- vBBBB (object)
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    testl     rINST,rINST               # is oject null?
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    je        .LOP_CHECK_CAST_JUMBO_okay          # null obj, cast always succeeds
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved class
+    movl      offObject_clazz(rINST),%ecx # ecx<- obj->clazz
+    testl     %eax,%eax                 # have we resolved this before?
+    je        .LOP_CHECK_CAST_JUMBO_resolve       # no, go do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+    cmpl      %eax,%ecx                 # same class (trivial success)?
+    jne       .LOP_CHECK_CAST_JUMBO_fullcheck     # no, do full check
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  ecx holds obj->clazz
+     *  eax holds class resolved from AAAAAAAA
+     *  rINST holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    movl    %eax,sReg0                 # we'll need the desired class on failure
+    movl    %eax,OUT_ARG1(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call    dvmInstanceofNonTrivial    # eax<- boolean result
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # failed?
+    jne     .LOP_CHECK_CAST_JUMBO_okay           # no, success
+
+    # A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC
+    movl    offObject_clazz(rINST),%eax
+    movl    %eax,OUT_ARG0(%esp)                 # arg0<- obj->clazz
+    movl    sReg0,%ecx
+    movl    %ecx,OUT_ARG1(%esp)                 # arg1<- desired class
+    call    dvmThrowClassCastException
+    jmp     common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path, and we're
+     * going to have to recreate some data.
+     *
+     *  rINST holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    movl    rSELF,%ecx
+    EXPORT_PC
+    movl    2(rPC),%eax                # eax<- AAAAAAAA
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)        # arg1<- AAAAAAAA
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
+    movl    $0,OUT_ARG2(%esp)         # arg2<- false
+    movl    %ecx,OUT_ARG0(%esp)        # arg0<- method->clazz
+    SPILL(rIBASE)
+    call    dvmResolveClass            # eax<- resolved ClassObject ptr
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # got null?
+    je      common_exceptionThrown     # yes, handle exception
+    movl    offObject_clazz(rINST),%ecx  # ecx<- obj->clazz
+    jmp     .LOP_CHECK_CAST_JUMBO_resolved       # pick up where we left off
+
+/* ------------------------------ */
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: x86/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    movzwl  8(rPC),%eax                 # eax<- CCCC
+    GET_VREG_R %eax %eax                # eax<- vCCCC (obj)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                   # object null?
+    movl    offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)                       # preserve rIBASE
+    je      .LOP_INSTANCE_OF_JUMBO_store           # null obj, not instance, store it
+    movl    2(rPC),rIBASE               # edx<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    movl    (%ecx,rIBASE,4),%ecx        # ecx<- resolved class
+    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
+    testl   %ecx,%ecx                   # have we resolved this before?
+    je      .LOP_INSTANCE_OF_JUMBO_resolve         # not resolved, do it now
+.LOP_INSTANCE_OF_JUMBO_resolved:  # eax<- obj->clazz, ecx<- resolved class
+    cmpl    %eax,%ecx                   # same class (trivial success)?
+    je      .LOP_INSTANCE_OF_JUMBO_trivial         # yes, trivial finish
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  eax holds obj->clazz
+     *  ecx holds class resolved from BBBB
+     *  rINST has BA
+     */
+    movl    %eax,OUT_ARG0(%esp)
+    movl    %ecx,OUT_ARG1(%esp)
+    call    dvmInstanceofNonTrivial     # eax<- boolean result
+    # fall through to OP_INSTANCE_OF_JUMBO_store
+
+    /*
+     * eax holds boolean result
+     * rINST holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    SET_VREG %eax rINST                 # vBBBB<- eax
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    movl    $1,%eax
+    SET_VREG %eax rINST                 # vBBBB<- true
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  edx holds AAAAAAAA
+     */
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)       # arg1<- AAAAAAAA
+    movl    rSELF,%ecx
+    movl    offThread_method(%ecx),%ecx
+    movl    $1,OUT_ARG2(%esp)          # arg2<- true
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    EXPORT_PC
+    movl    %ecx,OUT_ARG0(%esp)         # arg0<- method->clazz
+    call    dvmResolveClass             # eax<- resolved ClassObject ptr
+    testl   %eax,%eax                   # success?
+    je      common_exceptionThrown      # no, handle exception
+/* Now, we need to sync up with fast path.  We need eax to
+ * hold the obj->clazz, and ecx to hold the resolved class
+ */
+    movl    %eax,%ecx                   # ecx<- resolved class
+    movzwl  8(rPC),%eax                 # eax<- CCCC
+    GET_VREG_R %eax %eax                # eax<- vCCCC (obj)
+    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
+    jmp     .LOP_INSTANCE_OF_JUMBO_resolved
+
+/* ------------------------------ */
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: x86/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    EXPORT_PC
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved class
+    SPILL(rIBASE)
+    testl     %ecx,%ecx                 # resolved?
+    je        .LOP_NEW_INSTANCE_JUMBO_resolve       # no, go do it
+.LOP_NEW_INSTANCE_JUMBO_resolved:  # on entry, ecx<- class
+    cmpb      $CLASS_INITIALIZED,offClassObject_status(%ecx)
+    jne       .LOP_NEW_INSTANCE_JUMBO_needinit
+.LOP_NEW_INSTANCE_JUMBO_initialized:  # on entry, ecx<- class
+    movl      $ALLOC_DONT_TRACK,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    call     dvmAllocObject             # eax<- new object
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 4 %ecx
+    testl    %eax,%eax                  # success?
+    je       common_exceptionThrown     # no, bail out
+    SET_VREG %eax rINST
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Class initialization required.
+     *
+     *  ecx holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    SPILL_TMP1(%ecx)                    # save object
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmInitClass                # initialize class
+    UNSPILL_TMP1(%ecx)                  # restore object
+    testl   %eax,%eax                   # success?
+    jne     .LOP_NEW_INSTANCE_JUMBO_initialized     # success, continue
+    jmp     common_exceptionThrown      # go deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    movl    rSELF,%ecx
+    movl    2(rPC),%eax                 # eax<- AAAAAAAA
+    movl    offThread_method(%ecx),%ecx   # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    movl    $0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass             # call(clazz,off,flags)
+    movl    %eax,%ecx                   # ecx<- resolved ClassObject ptr
+    testl   %ecx,%ecx                   # success?
+    jne     .LOP_NEW_INSTANCE_JUMBO_resolved        # good to go
+    jmp     common_exceptionThrown      # no, handle exception
+
+/* ------------------------------ */
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: x86/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    movl    rSELF,%ecx
+    EXPORT_PC
+    movl    offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl    2(rPC),%eax                       # eax<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%ecx),%ecx  # ecx<- pDvmDex->pResClasses
+    SPILL(rIBASE)
+    movl    (%ecx,%eax,4),%ecx                # ecx<- resolved class
+    movzwl  8(rPC),%eax                       # eax<- CCCC
+    GET_VREG_R %eax %eax                      # eax<- vCCCC (array length)
+    testl   %eax,%eax
+    js      common_errNegativeArraySize       # bail, passing len in eax
+    testl   %ecx,%ecx                         # already resolved?
+    jne     .LOP_NEW_ARRAY_JUMBO_finish                # yes, fast path
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *  ecx holds class (null here)
+     *  eax holds array length (vCCCC)
+     */
+    movl    rSELF,%ecx
+    SPILL_TMP1(%eax)                   # save array length
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movl    2(rPC),%eax                # eax<- AAAAAAAA
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass            # eax<- call(clazz,ref,flag)
+    movl    %eax,%ecx
+    UNSPILL_TMP1(%eax)
+    testl   %ecx,%ecx                  # successful resolution?
+    je      common_exceptionThrown     # no, bail.
+# fall through to OP_NEW_ARRAY_JUMBO_finish
+
+    /*
+     * Finish allocation
+     *
+     * ecx holds class
+     * eax holds array length (vCCCC)
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    movl    %ecx,OUT_ARG0(%esp)
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)
+    call    dvmAllocArrayByClass    # eax<- call(clazz,length,flags)
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 5 %ecx
+    testl   %eax,%eax               # failed?
+    je      common_exceptionThrown  # yup - go handle
+    SET_VREG %eax rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: x86/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    movl    rSELF,%eax
+    movl    offThread_methodClassDex(%eax),%eax # eax<- pDvmDex
+    movl    2(rPC),%ecx                       # ecx<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%eax),%eax  # eax<- pDvmDex->pResClasses
+    movl    (%eax,%ecx,4),%eax                # eax<- resolved class
+    EXPORT_PC
+    testl   %eax,%eax                         # already resolved?
+    jne     .LOP_FILLED_NEW_ARRAY_JUMBO_continue              # yes, continue
+    # less frequent path, so we'll redo some work
+    movl    rSELF,%eax
+    movl    $0,OUT_ARG2(%esp)                # arg2<- false
+    movl    %ecx,OUT_ARG1(%esp)               # arg1<- AAAAAAAA
+    movl    offThread_method(%eax),%eax         # eax<- self->method
+    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
+    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
+    SPILL(rIBASE)
+    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                         # null?
+    je      common_exceptionThrown            # yes, handle it
+
+       # note: fall through to .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+    /*
+     * On entry:
+     *    eax holds array class [r0]
+     *    ecx is scratch
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
+    movl    $ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
+    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
+    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
+    movl    rSELF,%eax
+    cmpb    $'I',%cl                             # supported?
+    je      1f
+    cmpb    $'L',%cl
+    je      1f
+    cmpb    $'[',%cl
+    jne      .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl                  # no, not handled yet
+1:
+    movl    %ecx,offThread_retval+4(%eax)           # save type
+    movl    rINST,OUT_ARG1(%esp)                  # arg1<- BBBB (length)
+    SPILL(rIBASE)
+    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
+    UNSPILL(rIBASE)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                             # alloc successful?
+    je      common_exceptionThrown                # no, handle exception
+    movl    %eax,offThread_retval(%ecx)             # retval.l<- new array
+    movzwl  8(rPC),%ecx                           # ecx<- CCCC
+    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
+
+/* at this point:
+ *     eax is pointer to tgt
+ *     rINST is length
+ *     ecx is CCCC
+ *  We now need to copy values from registers into the array
+ */
+
+    # set up src pointer
+    SPILL_TMP2(%esi)
+    SPILL_TMP3(%edi)
+    leal    (rFP,%ecx,4),%esi # set up src ptr
+    movl    %eax,%edi         # set up dst ptr
+    movl    rINST,%ecx        # load count register
+    rep
+    movsd
+    UNSPILL_TMP2(%esi)
+    UNSPILL_TMP3(%edi)
+    movl    rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+
+    cmpb    $'I',%al                        # Int array?
+    je      5f                               # skip card mark if so
+    movl    offThread_retval(%ecx),%eax        # eax<- object head
+    movl    offThread_cardTable(%ecx),%ecx     # card table base
+    shrl    $GC_CARD_SHIFT,%eax             # convert to card num
+    movb    %cl,(%ecx,%eax)                  # mark card based on object head
+5:
+    FETCH_INST_OPCODE 5 %ecx
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    movl    $.LstrFilledNewArrayNotImplA,%eax
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowInternalError
+    jmp     common_exceptionThrown
+
+/* ------------------------------ */
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+/* ------------------------------ */
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: x86/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_WIDE_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # for dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save objpointer across call
+    movl    rPC,OUT_ARG0(%esp)                  # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IGET_WIDE_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_WIDE_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    leal    (%ecx,%eax,1),%eax                  # eax<- address of field
+    movl    (%eax),%ecx                         # ecx<- lsw
+    movl    4(%eax),%eax                        # eax<- msw
+    SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG_WORD %eax rINST 1
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: x86/OP_IGET_OBJECT_JUMBO.S */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_OBJECT_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_OBJECT_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_OBJECT_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: x86/OP_IGET_BOOLEAN_JUMBO.S */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_BOOLEAN_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_BOOLEAN_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movzbl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: x86/OP_IGET_BYTE_JUMBO.S */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_BYTE_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_BYTE_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_BYTE_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movsbl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: x86/OP_IGET_CHAR_JUMBO.S */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_CHAR_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_CHAR_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_CHAR_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movzwl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: x86/OP_IGET_SHORT_JUMBO.S */
+/* File: x86/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IGET_SHORT_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .LOP_IGET_SHORT_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IGET_SHORT_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movswl   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
+
+
+/* ------------------------------ */
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: x86/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    movl   rINST,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: x86/OP_IPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field put.
+     */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_WIDE_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  ... which returns InstrField ptr
+    jne     .LOP_IPUT_WIDE_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_WIDE_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rIBASE is scratch, but needs to be unspilled
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    leal    (%ecx,%eax,1),%eax                  # eax<- address of field
+    GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
+    GET_VREG_WORD rINST rINST 1                 # rINST<- msw
+    movl    rINST,4(%eax)
+    movl    %ecx,(%eax)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: x86/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo object field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                  # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_OBJECT_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_OBJECT_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rIBASE is scratch, but needs to be unspilled
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                      # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movl    rINST,(%ecx,%eax)      # obj.field <- v[BBBB](8/16/32 bits)
+    movl    rSELF,%eax
+    testl   rINST,rINST                         # stored a NULL?
+    movl    offThread_cardTable(%eax),%eax      # get card table base
+    je      1f                                  # skip card mark if null store
+    shrl    $GC_CARD_SHIFT,%ecx                # object head to card number
+    movb    %al,(%eax,%ecx)                     # mark card using object head
+1:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+/* ------------------------------ */
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: x86/OP_IPUT_BOOLEAN_JUMBO.S */
+/* File: x86/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_BOOLEAN_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_BOOLEAN_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    movb   rINSTbl,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: x86/OP_IPUT_BYTE_JUMBO.S */
+/* File: x86/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_BYTE_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_BYTE_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_BYTE_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    movb   rINSTbl,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: x86/OP_IPUT_CHAR_JUMBO.S */
+/* File: x86/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_CHAR_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_CHAR_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_CHAR_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    movw   rINSTw,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: x86/OP_IPUT_SHORT_JUMBO.S */
+/* File: x86/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .LOP_IPUT_SHORT_JUMBO_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .LOP_IPUT_SHORT_JUMBO_finish
+    jmp     common_exceptionThrown
+
+.LOP_IPUT_SHORT_JUMBO_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    movw   rINSTw,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+/* ------------------------------ */
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_JUMBO_resolve                # if not, make it so
+.LOP_SGET_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+/* ------------------------------ */
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: x86/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     *
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_WIDE_JUMBO_resolve                # if not, make it so
+.LOP_SGET_WIDE_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%ecx    # ecx<- lsw
+    movl      4+offStaticField_value(%eax),%eax  # eax<- msw
+    SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG_WORD %eax rINST 1
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_WIDE_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_WIDE_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+/* ------------------------------ */
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: x86/OP_SGET_OBJECT_JUMBO.S */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_OBJECT_JUMBO_resolve                # if not, make it so
+.LOP_SGET_OBJECT_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_OBJECT_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_OBJECT_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: x86/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_BOOLEAN_JUMBO_resolve                # if not, make it so
+.LOP_SGET_BOOLEAN_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_BOOLEAN_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_BOOLEAN_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: x86/OP_SGET_BYTE_JUMBO.S */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_BYTE_JUMBO_resolve                # if not, make it so
+.LOP_SGET_BYTE_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_BYTE_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_BYTE_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: x86/OP_SGET_CHAR_JUMBO.S */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_CHAR_JUMBO_resolve                # if not, make it so
+.LOP_SGET_CHAR_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_CHAR_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_CHAR_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: x86/OP_SGET_SHORT_JUMBO.S */
+/* File: x86/OP_SGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SGET_SHORT_JUMBO_resolve                # if not, make it so
+.LOP_SGET_SHORT_JUMBO_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SGET_SHORT_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SGET_SHORT_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: x86/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_JUMBO_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+/* ------------------------------ */
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: x86/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_WIDE_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_WIDE_JUMBO_finish:     # field ptr in eax
+    GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
+    GET_VREG_WORD rINST rINST 1                 # rINST<- msw
+    movl      %ecx,offStaticField_value(%eax)
+    FETCH_INST_OPCODE 4 %ecx
+    movl      rINST,4+offStaticField_value(%eax)
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_WIDE_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+/* ------------------------------ */
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: x86/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo SPUT object handler.
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_OBJECT_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_OBJECT_JUMBO_finish:                              # field ptr in eax
+    GET_VREG_R  %ecx rINST
+    movl      %ecx,offStaticField_value(%eax)    # do the store
+    testl     %ecx,%ecx                          # stored null object ptr?
+    je        1f                                 # skip card mark if null
+    movl      rSELF,%ecx
+    movl      offField_clazz(%eax),%eax          # eax<- method->clazz
+    movl      offThread_cardTable(%ecx),%ecx       # get card table base
+    shrl      $GC_CARD_SHIFT,%eax               # head to card number
+    movb      %cl,(%ecx,%eax)                    # mark card
+1:
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+.LOP_SPUT_OBJECT_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_OBJECT_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+/* ------------------------------ */
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: x86/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: x86/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_BOOLEAN_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_BOOLEAN_JUMBO_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_BOOLEAN_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_BOOLEAN_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: x86/OP_SPUT_BYTE_JUMBO.S */
+/* File: x86/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_BYTE_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_BYTE_JUMBO_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_BYTE_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_BYTE_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: x86/OP_SPUT_CHAR_JUMBO.S */
+/* File: x86/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_CHAR_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_CHAR_JUMBO_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_CHAR_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_CHAR_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: x86/OP_SPUT_SHORT_JUMBO.S */
+/* File: x86/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .LOP_SPUT_SHORT_JUMBO_resolve                # if not, make it so
+.LOP_SPUT_SHORT_JUMBO_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.LOP_SPUT_SHORT_JUMBO_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .LOP_SPUT_SHORT_JUMBO_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
+
+
+/* ------------------------------ */
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: x86/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a jumbo virtual method call.
+     */
+    /* invoke-virtual/jumbo vBBBB, {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%eax
+    movl      2(rPC),%ecx                 # ecx<- AAAAAAAA
+    movl      offThread_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
+    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
+    testl     %eax,%eax                   # already resolved?
+    jne       .LOP_INVOKE_VIRTUAL_JUMBO_continue        # yes, continue
+    movl      rSELF,%eax
+    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    movl      offThread_method(%eax),%eax   # eax<- self->method
+    movl      offMethod_clazz(%eax),%eax  # ecx<- method->clazz
+    movl      %eax,OUT_ARG0(%esp)         # arg0<- clazz
+    movl      $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
+    call      dvmResolveMethod            # eax<- call(clazz, ref, flags)
+    testl     %eax,%eax                   # got null?
+    jne       .LOP_INVOKE_VIRTUAL_JUMBO_continue        # no, continue
+    jmp       common_exceptionThrown      # yes, handle exception
+
+    /* At this point:
+     *   eax = resolved base method
+     *   ecx = scratch
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    movzwl    8(rPC),%ecx               # ecx<- CCCC
+    GET_VREG_R  %ecx %ecx               # ecx<- "this"
+    movzwl    offMethod_methodIndex(%eax),%eax  # eax<- baseMethod->methodIndex
+    testl     %ecx,%ecx                 # null this?
+    je        common_errNullObject      # go if so
+    movl      offObject_clazz(%ecx),%ecx  # ecx<- thisPtr->clazz
+    movl      offClassObject_vtable(%ecx),%ecx # ecx<- thisPtr->clazz->vtable
+    movl      (%ecx,%eax,4),%eax        # eax<- vtable[methodIndex]
+    jmp       common_invokeMethodJumbo
+
+/* ------------------------------ */
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: x86/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a jumbo "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,rINST
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(rINST),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
+    movl      offThread_method(rINST),%eax # eax<- method
+    movzwl    8(rPC),rINST              # rINST<- CCCC
+    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
+    testl     rINST,rINST               # null "this"?
+    je        common_errNullObject      # yes, throw
+    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
+    testl     %ecx,%ecx                 # already resolved?
+    je       .LOP_INVOKE_SUPER_JUMBO_resolve
+    /*
+     * At this point:
+     *  ecx = resolved base method [r0]
+     *  eax = method->clazz [r9]
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    movl    offClassObject_super(%eax),%eax   # eax<- method->clazz->super
+    movzwl  offMethod_methodIndex(%ecx),%ecx  # ecx<- baseMthod->methodIndex
+    cmpl    offClassObject_vtableCount(%eax),%ecx # compare(methodIndex,vtableCount)
+    jae     .LOP_INVOKE_SUPER_JUMBO_nsm           # method not present in superclass
+    movl    offClassObject_vtable(%eax),%eax   # eax<- ...clazz->super->vtable
+    movl    (%eax,%ecx,4),%eax        # eax<- vtable[methodIndex]
+    jmp     common_invokeMethodJumbo
+
+
+    /* At this point:
+     * ecx = null (needs to be resolved base method)
+     * eax = method->clazz
+    */
+.LOP_INVOKE_SUPER_JUMBO_resolve:
+    SPILL_TMP1(%eax)                    # method->clazz
+    movl    %eax,OUT_ARG0(%esp)         # arg0<- method->clazz
+    movl    2(rPC),%ecx                 # ecx<- AAAAAAAA
+    movl    $METHOD_VIRTUAL,OUT_ARG2(%esp)  # arg2<- resolver method type
+    movl    %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    call    dvmResolveMethod            # eax<- call(clazz, ref, flags)
+    testl   %eax,%eax                   # got null?
+    movl    %eax,%ecx                   # ecx<- resolved base method
+    UNSPILL_TMP1(%eax)                  # restore method->clazz
+    jne     .LOP_INVOKE_SUPER_JUMBO_continue        # good to go - continue
+    jmp     common_exceptionThrown      # handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  ecx = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    movl    offMethod_name(%ecx),%eax
+    jmp     common_errNoSuchMethod
+
+/* ------------------------------ */
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: x86/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a jumbo direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax              # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movzwl    8(rPC),rIBASE            # rIBASE<- CCCC
+    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
+    testl     %eax,%eax                # already resolved?
+    GET_VREG_R  %ecx rIBASE            # ecx<- "this" ptr
+    je        .LOP_INVOKE_DIRECT_JUMBO_resolve      # not resolved, do it now
+.LOP_INVOKE_DIRECT_JUMBO_finish:
+    testl     %ecx,%ecx                # null "this"?
+    jne       common_invokeMethodJumbo # no, continue on
+    jmp       common_errNullObject
+
+    /*
+     * On entry:
+     *   TMP_SPILL  <- "this" register
+     * Things a bit ugly on this path, but it's the less
+     * frequent one.  We'll have to do some reloading.
+     */
+.LOP_INVOKE_DIRECT_JUMBO_resolve:
+     SPILL_TMP1(%ecx)
+     movl     rSELF,%ecx
+     movl     offThread_method(%ecx),%ecx  # ecx<- self->method
+     movl     2(rPC),%eax      # reference AAAAAAAA
+     movl     offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+     movl     $METHOD_DIRECT,OUT_ARG2(%esp)
+     movl     %eax,OUT_ARG1(%esp)
+     movl     %ecx,OUT_ARG0(%esp)
+     call     dvmResolveMethod # eax<- call(clazz, ref, flags)
+     UNSPILL_TMP1(%ecx)
+     testl    %eax,%eax
+     jne      .LOP_INVOKE_DIRECT_JUMBO_finish
+     jmp      common_exceptionThrown
+
+/* ------------------------------ */
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: x86/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a jumbo static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
+    testl     %eax,%eax
+    jne       common_invokeMethodJumbo
+    movl      rSELF,%ecx
+    movl      offThread_method(%ecx),%ecx # ecx<- self->method
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+    movl      %eax,OUT_ARG1(%esp)       # arg1<- AAAAAAAA
+    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
+    movl      $METHOD_STATIC,%eax
+    movl      %eax,OUT_ARG2(%esp)       # arg2<- flags
+    call      dvmResolveMethod          # call(clazz,ref,flags)
+    testl     %eax,%eax                 # got null?
+    jne       common_invokeMethodJumbo
+    jmp       common_exceptionThrown
+
+/* ------------------------------ */
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: x86/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle a jumbo interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movzwl     8(rPC),%eax              # eax<- CCCC
+    movl       rSELF,%ecx
+    GET_VREG_R   %eax %eax              # eax<- "this"
+    EXPORT_PC
+    testl      %eax,%eax                # null this?
+    je         common_errNullObject     # yes, fail
+    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
+    movl       offThread_methodClassDex(%ecx),%eax   # eax<- methodClassDex
+    movl       offThread_method(%ecx),%ecx           # ecx<- method
+    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
+    movl       2(rPC),%eax                         # eax<- AAAAAAAA
+    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
+    movl       %eax,OUT_ARG1(%esp)                 # arg1<- AAAAAAAA
+    call       dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
+    testl      %eax,%eax
+    je         common_exceptionThrown
+    jmp        common_invokeMethodJumbo
+
+/* ------------------------------ */
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: x86/OP_UNUSED_27FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: x86/OP_UNUSED_28FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: x86/OP_UNUSED_29FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: x86/OP_UNUSED_2AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: x86/OP_UNUSED_2BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: x86/OP_UNUSED_2CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: x86/OP_UNUSED_2DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: x86/OP_UNUSED_2EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: x86/OP_UNUSED_2FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: x86/OP_UNUSED_30FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: x86/OP_UNUSED_31FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: x86/OP_UNUSED_32FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: x86/OP_UNUSED_33FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: x86/OP_UNUSED_34FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: x86/OP_UNUSED_35FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: x86/OP_UNUSED_36FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: x86/OP_UNUSED_37FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: x86/OP_UNUSED_38FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: x86/OP_UNUSED_39FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: x86/OP_UNUSED_3AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: x86/OP_UNUSED_3BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: x86/OP_UNUSED_3CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: x86/OP_UNUSED_3DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: x86/OP_UNUSED_3EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: x86/OP_UNUSED_3FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: x86/OP_UNUSED_40FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: x86/OP_UNUSED_41FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: x86/OP_UNUSED_42FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: x86/OP_UNUSED_43FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: x86/OP_UNUSED_44FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: x86/OP_UNUSED_45FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: x86/OP_UNUSED_46FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: x86/OP_UNUSED_47FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: x86/OP_UNUSED_48FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: x86/OP_UNUSED_49FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: x86/OP_UNUSED_4AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: x86/OP_UNUSED_4BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: x86/OP_UNUSED_4CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: x86/OP_UNUSED_4DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: x86/OP_UNUSED_4EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: x86/OP_UNUSED_4FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: x86/OP_UNUSED_50FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: x86/OP_UNUSED_51FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: x86/OP_UNUSED_52FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: x86/OP_UNUSED_53FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: x86/OP_UNUSED_54FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: x86/OP_UNUSED_55FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: x86/OP_UNUSED_56FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: x86/OP_UNUSED_57FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: x86/OP_UNUSED_58FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: x86/OP_UNUSED_59FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: x86/OP_UNUSED_5AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: x86/OP_UNUSED_5BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: x86/OP_UNUSED_5CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: x86/OP_UNUSED_5DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: x86/OP_UNUSED_5EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: x86/OP_UNUSED_5FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: x86/OP_UNUSED_60FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: x86/OP_UNUSED_61FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: x86/OP_UNUSED_62FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: x86/OP_UNUSED_63FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: x86/OP_UNUSED_64FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: x86/OP_UNUSED_65FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: x86/OP_UNUSED_66FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: x86/OP_UNUSED_67FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: x86/OP_UNUSED_68FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: x86/OP_UNUSED_69FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: x86/OP_UNUSED_6AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: x86/OP_UNUSED_6BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: x86/OP_UNUSED_6CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: x86/OP_UNUSED_6DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: x86/OP_UNUSED_6EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: x86/OP_UNUSED_6FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: x86/OP_UNUSED_70FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: x86/OP_UNUSED_71FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: x86/OP_UNUSED_72FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: x86/OP_UNUSED_73FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: x86/OP_UNUSED_74FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: x86/OP_UNUSED_75FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: x86/OP_UNUSED_76FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: x86/OP_UNUSED_77FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: x86/OP_UNUSED_78FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: x86/OP_UNUSED_79FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: x86/OP_UNUSED_7AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: x86/OP_UNUSED_7BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: x86/OP_UNUSED_7CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: x86/OP_UNUSED_7DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: x86/OP_UNUSED_7EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: x86/OP_UNUSED_7FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: x86/OP_UNUSED_80FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: x86/OP_UNUSED_81FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: x86/OP_UNUSED_82FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: x86/OP_UNUSED_83FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: x86/OP_UNUSED_84FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: x86/OP_UNUSED_85FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: x86/OP_UNUSED_86FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: x86/OP_UNUSED_87FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: x86/OP_UNUSED_88FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: x86/OP_UNUSED_89FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: x86/OP_UNUSED_8AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: x86/OP_UNUSED_8BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: x86/OP_UNUSED_8CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: x86/OP_UNUSED_8DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: x86/OP_UNUSED_8EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: x86/OP_UNUSED_8FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: x86/OP_UNUSED_90FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: x86/OP_UNUSED_91FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: x86/OP_UNUSED_92FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: x86/OP_UNUSED_93FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: x86/OP_UNUSED_94FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: x86/OP_UNUSED_95FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: x86/OP_UNUSED_96FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: x86/OP_UNUSED_97FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: x86/OP_UNUSED_98FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: x86/OP_UNUSED_99FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: x86/OP_UNUSED_9AFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: x86/OP_UNUSED_9BFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: x86/OP_UNUSED_9CFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: x86/OP_UNUSED_9DFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: x86/OP_UNUSED_9EFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: x86/OP_UNUSED_9FFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: x86/OP_UNUSED_A0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: x86/OP_UNUSED_A1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: x86/OP_UNUSED_A2FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: x86/OP_UNUSED_A3FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: x86/OP_UNUSED_A4FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: x86/OP_UNUSED_A5FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: x86/OP_UNUSED_A6FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: x86/OP_UNUSED_A7FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: x86/OP_UNUSED_A8FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: x86/OP_UNUSED_A9FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: x86/OP_UNUSED_AAFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: x86/OP_UNUSED_ABFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: x86/OP_UNUSED_ACFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: x86/OP_UNUSED_ADFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: x86/OP_UNUSED_AEFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: x86/OP_UNUSED_AFFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: x86/OP_UNUSED_B0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: x86/OP_UNUSED_B1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: x86/OP_UNUSED_B2FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: x86/OP_UNUSED_B3FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: x86/OP_UNUSED_B4FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: x86/OP_UNUSED_B5FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: x86/OP_UNUSED_B6FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: x86/OP_UNUSED_B7FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: x86/OP_UNUSED_B8FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: x86/OP_UNUSED_B9FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: x86/OP_UNUSED_BAFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: x86/OP_UNUSED_BBFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: x86/OP_UNUSED_BCFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: x86/OP_UNUSED_BDFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: x86/OP_UNUSED_BEFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: x86/OP_UNUSED_BFFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: x86/OP_UNUSED_C0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: x86/OP_UNUSED_C1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: x86/OP_UNUSED_C2FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: x86/OP_UNUSED_C3FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: x86/OP_UNUSED_C4FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: x86/OP_UNUSED_C5FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: x86/OP_UNUSED_C6FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: x86/OP_UNUSED_C7FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: x86/OP_UNUSED_C8FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: x86/OP_UNUSED_C9FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: x86/OP_UNUSED_CAFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: x86/OP_UNUSED_CBFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: x86/OP_UNUSED_CCFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: x86/OP_UNUSED_CDFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: x86/OP_UNUSED_CEFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: x86/OP_UNUSED_CFFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: x86/OP_UNUSED_D0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: x86/OP_UNUSED_D1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: x86/OP_UNUSED_D2FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: x86/OP_UNUSED_D3FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: x86/OP_UNUSED_D4FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: x86/OP_UNUSED_D5FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: x86/OP_UNUSED_D6FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: x86/OP_UNUSED_D7FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: x86/OP_UNUSED_D8FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: x86/OP_UNUSED_D9FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: x86/OP_UNUSED_DAFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: x86/OP_UNUSED_DBFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: x86/OP_UNUSED_DCFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: x86/OP_UNUSED_DDFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: x86/OP_UNUSED_DEFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: x86/OP_UNUSED_DFFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: x86/OP_UNUSED_E0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: x86/OP_UNUSED_E1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: x86/OP_UNUSED_E2FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: x86/OP_UNUSED_E3FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: x86/OP_UNUSED_E4FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: x86/OP_UNUSED_E5FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: x86/OP_UNUSED_E6FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: x86/OP_UNUSED_E7FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: x86/OP_UNUSED_E8FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: x86/OP_UNUSED_E9FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: x86/OP_UNUSED_EAFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: x86/OP_UNUSED_EBFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: x86/OP_UNUSED_ECFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: x86/OP_UNUSED_EDFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: x86/OP_UNUSED_EEFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: x86/OP_UNUSED_EFFF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: x86/OP_UNUSED_F0FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: x86/OP_UNUSED_F1FF.S */
+/* File: x86/unused.S */
+    jmp     common_abort
+
+
+/* ------------------------------ */
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_INVOKE_OBJECT_INIT_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IGET_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IGET_WIDE_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IGET_OBJECT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IPUT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IPUT_WIDE_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_IPUT_OBJECT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SGET_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SGET_WIDE_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SGET_OBJECT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SPUT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SPUT_WIDE_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+    /* (stub) */
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
+    call      dvmMterp_OP_SPUT_OBJECT_VOLATILE_JUMBO     # do the real work
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
+    FETCH_INST
+    GOTO_NEXT
+/* ------------------------------ */
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: x86/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, ref@AAAAAAAA */
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                     # eax<- AAAAAAAA
+    movl     offThread_method(%ecx),%ecx       # ecx<- self->method
+    EXPORT_PC
+    movl     %eax,OUT_ARG2(%esp)             # arg2<- AAAAAAAA
+    movl     rINST,OUT_ARG1(%esp)            # arg1<- BBBB
+    movl     %ecx,OUT_ARG0(%esp)             # arg0<- method
+    call     dvmThrowVerificationError       # call(method, kind, ref)
+    jmp      common_exceptionThrown          # handle exception
+
+    .size   dvmAsmInstructionStartCode, .-dvmAsmInstructionStartCode
+    .global dvmAsmInstructionEndCode
+dvmAsmInstructionEndCode:
+
+    .global dvmAsmAltInstructionStartCode
+    .type   dvmAsmAltInstructionStartCode, %function
+dvmAsmAltInstructionStartCode:
+    .text
+
+/* ------------------------------ */
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(0*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(1*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(2*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(3*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(4*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(5*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(6*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(7*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(8*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(9*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(10*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(11*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(12*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(13*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(14*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(15*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(16*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(17*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(18*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(19*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(20*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(21*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(22*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(23*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(24*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(25*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(26*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(27*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(28*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(29*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(30*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(31*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(32*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(33*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(34*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(35*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(36*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(37*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(38*4)
+
+/* ------------------------------ */
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(39*4)
+
+/* ------------------------------ */
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(40*4)
+
+/* ------------------------------ */
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(41*4)
+
+/* ------------------------------ */
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(42*4)
+
+/* ------------------------------ */
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(43*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(44*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(45*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(46*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(47*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(48*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(49*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(50*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(51*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(52*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(53*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(54*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(55*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(56*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(57*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(58*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(59*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(60*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(61*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(62*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(63*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(64*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(65*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(66*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(67*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(68*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(69*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(70*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(71*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(72*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(73*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(74*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(75*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(76*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(77*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(78*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(79*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(80*4)
+
+/* ------------------------------ */
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(81*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(82*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(83*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(84*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(85*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(86*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(87*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(88*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(89*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(90*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(91*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(92*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(93*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(94*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(95*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(96*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(97*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(98*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(99*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(100*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(101*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(102*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(103*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(104*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(105*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(106*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(107*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(108*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(109*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(110*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(111*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(112*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(113*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(114*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(115*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(116*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(117*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(118*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(119*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(120*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(121*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(122*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(123*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(124*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(125*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(126*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(127*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(128*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(129*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(130*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(131*4)
+
+/* ------------------------------ */
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(132*4)
+
+/* ------------------------------ */
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(133*4)
+
+/* ------------------------------ */
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(134*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(135*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(136*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(137*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(138*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(139*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(140*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(141*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(142*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(143*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(144*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(145*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(146*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(147*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(148*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(149*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(150*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(151*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(152*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(153*4)
+
+/* ------------------------------ */
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(154*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(155*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(156*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(157*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(158*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(159*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(160*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(161*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(162*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(163*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(164*4)
+
+/* ------------------------------ */
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(165*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(166*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(167*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(168*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(169*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(170*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(171*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(172*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(173*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(174*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(175*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(176*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(177*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(178*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(179*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(180*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(181*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(182*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(183*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(184*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(185*4)
+
+/* ------------------------------ */
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(186*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(187*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(188*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(189*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(190*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(191*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(192*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(193*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(194*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(195*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(196*4)
+
+/* ------------------------------ */
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(197*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(198*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(199*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(200*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(201*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(202*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(203*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(204*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(205*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(206*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(207*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(208*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(209*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(210*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(211*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(212*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(213*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(214*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(215*4)
+
+/* ------------------------------ */
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(216*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(217*4)
+
+/* ------------------------------ */
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(218*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(219*4)
+
+/* ------------------------------ */
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(220*4)
+
+/* ------------------------------ */
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(221*4)
+
+/* ------------------------------ */
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(222*4)
+
+/* ------------------------------ */
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(223*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(224*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(225*4)
+
+/* ------------------------------ */
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(226*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(227*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(228*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(229*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(230*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(231*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(232*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(233*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(234*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(235*4)
+
+/* ------------------------------ */
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(236*4)
+
+/* ------------------------------ */
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(237*4)
+
+/* ------------------------------ */
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(238*4)
+
+/* ------------------------------ */
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(239*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(240*4)
+
+/* ------------------------------ */
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(241*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(242*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(243*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(244*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(245*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(246*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(247*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(248*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(249*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(250*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(251*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(252*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(253*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(254*4)
+
+/* ------------------------------ */
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(255*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(256*4)
+
+/* ------------------------------ */
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(257*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(258*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(259*4)
+
+/* ------------------------------ */
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(260*4)
+
+/* ------------------------------ */
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(261*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(262*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(263*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(264*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(265*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(266*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(267*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(268*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(269*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(270*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(271*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(272*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(273*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(274*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(275*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(276*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(277*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(278*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(279*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(280*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(281*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(282*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(283*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(284*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(285*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(286*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(287*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(288*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(289*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(290*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(291*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(292*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(293*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(294*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(295*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(296*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(297*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(298*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(299*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(300*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(301*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(302*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(303*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(304*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(305*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(306*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(307*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(308*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(309*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(310*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(311*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(312*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(313*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(314*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(315*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(316*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(317*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(318*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(319*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(320*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(321*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(322*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(323*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(324*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(325*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(326*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(327*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(328*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(329*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(330*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(331*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(332*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(333*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(334*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(335*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(336*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(337*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(338*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(339*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(340*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(341*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(342*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(343*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(344*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(345*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(346*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(347*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(348*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(349*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(350*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(351*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(352*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(353*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(354*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(355*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(356*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(357*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(358*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(359*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(360*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(361*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(362*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(363*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(364*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(365*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(366*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(367*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(368*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(369*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(370*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(371*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(372*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(373*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(374*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(375*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(376*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(377*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(378*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(379*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(380*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(381*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(382*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(383*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(384*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(385*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(386*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(387*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(388*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(389*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(390*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(391*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(392*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(393*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(394*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(395*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(396*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(397*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(398*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(399*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(400*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(401*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(402*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(403*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(404*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(405*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(406*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(407*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(408*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(409*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(410*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(411*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(412*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(413*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(414*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(415*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(416*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(417*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(418*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(419*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(420*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(421*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(422*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(423*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(424*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(425*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(426*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(427*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(428*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(429*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(430*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(431*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(432*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(433*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(434*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(435*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(436*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(437*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(438*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(439*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(440*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(441*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(442*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(443*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(444*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(445*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(446*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(447*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(448*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(449*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(450*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(451*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(452*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(453*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(454*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(455*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(456*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(457*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(458*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(459*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(460*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(461*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(462*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(463*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(464*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(465*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(466*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(467*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(468*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(469*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(470*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(471*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(472*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(473*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(474*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(475*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(476*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(477*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(478*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(479*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(480*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(481*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(482*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(483*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(484*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(485*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(486*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(487*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(488*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(489*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(490*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(491*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(492*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(493*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(494*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(495*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(496*4)
+
+/* ------------------------------ */
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(497*4)
+
+/* ------------------------------ */
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(498*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(499*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(500*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(501*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(502*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(503*4)
+
+/* ------------------------------ */
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(504*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(505*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(506*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(507*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(508*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(509*4)
+
+/* ------------------------------ */
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(510*4)
+
+/* ------------------------------ */
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: x86/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(511*4)
+
+    .size   dvmAsmAltInstructionStartCode, .-dvmAsmAltInstructionStartCode
+    .global dvmAsmAltInstructionEndCode
+dvmAsmAltInstructionEndCode:
+
+    .global dvmAsmInstructionStart
+    .text
+dvmAsmInstructionStart:
+    .long .L_OP_NOP /* 0x00 */
+    .long .L_OP_MOVE /* 0x01 */
+    .long .L_OP_MOVE_FROM16 /* 0x02 */
+    .long .L_OP_MOVE_16 /* 0x03 */
+    .long .L_OP_MOVE_WIDE /* 0x04 */
+    .long .L_OP_MOVE_WIDE_FROM16 /* 0x05 */
+    .long .L_OP_MOVE_WIDE_16 /* 0x06 */
+    .long .L_OP_MOVE_OBJECT /* 0x07 */
+    .long .L_OP_MOVE_OBJECT_FROM16 /* 0x08 */
+    .long .L_OP_MOVE_OBJECT_16 /* 0x09 */
+    .long .L_OP_MOVE_RESULT /* 0x0a */
+    .long .L_OP_MOVE_RESULT_WIDE /* 0x0b */
+    .long .L_OP_MOVE_RESULT_OBJECT /* 0x0c */
+    .long .L_OP_MOVE_EXCEPTION /* 0x0d */
+    .long .L_OP_RETURN_VOID /* 0x0e */
+    .long .L_OP_RETURN /* 0x0f */
+    .long .L_OP_RETURN_WIDE /* 0x10 */
+    .long .L_OP_RETURN_OBJECT /* 0x11 */
+    .long .L_OP_CONST_4 /* 0x12 */
+    .long .L_OP_CONST_16 /* 0x13 */
+    .long .L_OP_CONST /* 0x14 */
+    .long .L_OP_CONST_HIGH16 /* 0x15 */
+    .long .L_OP_CONST_WIDE_16 /* 0x16 */
+    .long .L_OP_CONST_WIDE_32 /* 0x17 */
+    .long .L_OP_CONST_WIDE /* 0x18 */
+    .long .L_OP_CONST_WIDE_HIGH16 /* 0x19 */
+    .long .L_OP_CONST_STRING /* 0x1a */
+    .long .L_OP_CONST_STRING_JUMBO /* 0x1b */
+    .long .L_OP_CONST_CLASS /* 0x1c */
+    .long .L_OP_MONITOR_ENTER /* 0x1d */
+    .long .L_OP_MONITOR_EXIT /* 0x1e */
+    .long .L_OP_CHECK_CAST /* 0x1f */
+    .long .L_OP_INSTANCE_OF /* 0x20 */
+    .long .L_OP_ARRAY_LENGTH /* 0x21 */
+    .long .L_OP_NEW_INSTANCE /* 0x22 */
+    .long .L_OP_NEW_ARRAY /* 0x23 */
+    .long .L_OP_FILLED_NEW_ARRAY /* 0x24 */
+    .long .L_OP_FILLED_NEW_ARRAY_RANGE /* 0x25 */
+    .long .L_OP_FILL_ARRAY_DATA /* 0x26 */
+    .long .L_OP_THROW /* 0x27 */
+    .long .L_OP_GOTO /* 0x28 */
+    .long .L_OP_GOTO_16 /* 0x29 */
+    .long .L_OP_GOTO_32 /* 0x2a */
+    .long .L_OP_PACKED_SWITCH /* 0x2b */
+    .long .L_OP_SPARSE_SWITCH /* 0x2c */
+    .long .L_OP_CMPL_FLOAT /* 0x2d */
+    .long .L_OP_CMPG_FLOAT /* 0x2e */
+    .long .L_OP_CMPL_DOUBLE /* 0x2f */
+    .long .L_OP_CMPG_DOUBLE /* 0x30 */
+    .long .L_OP_CMP_LONG /* 0x31 */
+    .long .L_OP_IF_EQ /* 0x32 */
+    .long .L_OP_IF_NE /* 0x33 */
+    .long .L_OP_IF_LT /* 0x34 */
+    .long .L_OP_IF_GE /* 0x35 */
+    .long .L_OP_IF_GT /* 0x36 */
+    .long .L_OP_IF_LE /* 0x37 */
+    .long .L_OP_IF_EQZ /* 0x38 */
+    .long .L_OP_IF_NEZ /* 0x39 */
+    .long .L_OP_IF_LTZ /* 0x3a */
+    .long .L_OP_IF_GEZ /* 0x3b */
+    .long .L_OP_IF_GTZ /* 0x3c */
+    .long .L_OP_IF_LEZ /* 0x3d */
+    .long .L_OP_UNUSED_3E /* 0x3e */
+    .long .L_OP_UNUSED_3F /* 0x3f */
+    .long .L_OP_UNUSED_40 /* 0x40 */
+    .long .L_OP_UNUSED_41 /* 0x41 */
+    .long .L_OP_UNUSED_42 /* 0x42 */
+    .long .L_OP_UNUSED_43 /* 0x43 */
+    .long .L_OP_AGET /* 0x44 */
+    .long .L_OP_AGET_WIDE /* 0x45 */
+    .long .L_OP_AGET_OBJECT /* 0x46 */
+    .long .L_OP_AGET_BOOLEAN /* 0x47 */
+    .long .L_OP_AGET_BYTE /* 0x48 */
+    .long .L_OP_AGET_CHAR /* 0x49 */
+    .long .L_OP_AGET_SHORT /* 0x4a */
+    .long .L_OP_APUT /* 0x4b */
+    .long .L_OP_APUT_WIDE /* 0x4c */
+    .long .L_OP_APUT_OBJECT /* 0x4d */
+    .long .L_OP_APUT_BOOLEAN /* 0x4e */
+    .long .L_OP_APUT_BYTE /* 0x4f */
+    .long .L_OP_APUT_CHAR /* 0x50 */
+    .long .L_OP_APUT_SHORT /* 0x51 */
+    .long .L_OP_IGET /* 0x52 */
+    .long .L_OP_IGET_WIDE /* 0x53 */
+    .long .L_OP_IGET_OBJECT /* 0x54 */
+    .long .L_OP_IGET_BOOLEAN /* 0x55 */
+    .long .L_OP_IGET_BYTE /* 0x56 */
+    .long .L_OP_IGET_CHAR /* 0x57 */
+    .long .L_OP_IGET_SHORT /* 0x58 */
+    .long .L_OP_IPUT /* 0x59 */
+    .long .L_OP_IPUT_WIDE /* 0x5a */
+    .long .L_OP_IPUT_OBJECT /* 0x5b */
+    .long .L_OP_IPUT_BOOLEAN /* 0x5c */
+    .long .L_OP_IPUT_BYTE /* 0x5d */
+    .long .L_OP_IPUT_CHAR /* 0x5e */
+    .long .L_OP_IPUT_SHORT /* 0x5f */
+    .long .L_OP_SGET /* 0x60 */
+    .long .L_OP_SGET_WIDE /* 0x61 */
+    .long .L_OP_SGET_OBJECT /* 0x62 */
+    .long .L_OP_SGET_BOOLEAN /* 0x63 */
+    .long .L_OP_SGET_BYTE /* 0x64 */
+    .long .L_OP_SGET_CHAR /* 0x65 */
+    .long .L_OP_SGET_SHORT /* 0x66 */
+    .long .L_OP_SPUT /* 0x67 */
+    .long .L_OP_SPUT_WIDE /* 0x68 */
+    .long .L_OP_SPUT_OBJECT /* 0x69 */
+    .long .L_OP_SPUT_BOOLEAN /* 0x6a */
+    .long .L_OP_SPUT_BYTE /* 0x6b */
+    .long .L_OP_SPUT_CHAR /* 0x6c */
+    .long .L_OP_SPUT_SHORT /* 0x6d */
+    .long .L_OP_INVOKE_VIRTUAL /* 0x6e */
+    .long .L_OP_INVOKE_SUPER /* 0x6f */
+    .long .L_OP_INVOKE_DIRECT /* 0x70 */
+    .long .L_OP_INVOKE_STATIC /* 0x71 */
+    .long .L_OP_INVOKE_INTERFACE /* 0x72 */
+    .long .L_OP_UNUSED_73 /* 0x73 */
+    .long .L_OP_INVOKE_VIRTUAL_RANGE /* 0x74 */
+    .long .L_OP_INVOKE_SUPER_RANGE /* 0x75 */
+    .long .L_OP_INVOKE_DIRECT_RANGE /* 0x76 */
+    .long .L_OP_INVOKE_STATIC_RANGE /* 0x77 */
+    .long .L_OP_INVOKE_INTERFACE_RANGE /* 0x78 */
+    .long .L_OP_UNUSED_79 /* 0x79 */
+    .long .L_OP_UNUSED_7A /* 0x7a */
+    .long .L_OP_NEG_INT /* 0x7b */
+    .long .L_OP_NOT_INT /* 0x7c */
+    .long .L_OP_NEG_LONG /* 0x7d */
+    .long .L_OP_NOT_LONG /* 0x7e */
+    .long .L_OP_NEG_FLOAT /* 0x7f */
+    .long .L_OP_NEG_DOUBLE /* 0x80 */
+    .long .L_OP_INT_TO_LONG /* 0x81 */
+    .long .L_OP_INT_TO_FLOAT /* 0x82 */
+    .long .L_OP_INT_TO_DOUBLE /* 0x83 */
+    .long .L_OP_LONG_TO_INT /* 0x84 */
+    .long .L_OP_LONG_TO_FLOAT /* 0x85 */
+    .long .L_OP_LONG_TO_DOUBLE /* 0x86 */
+    .long .L_OP_FLOAT_TO_INT /* 0x87 */
+    .long .L_OP_FLOAT_TO_LONG /* 0x88 */
+    .long .L_OP_FLOAT_TO_DOUBLE /* 0x89 */
+    .long .L_OP_DOUBLE_TO_INT /* 0x8a */
+    .long .L_OP_DOUBLE_TO_LONG /* 0x8b */
+    .long .L_OP_DOUBLE_TO_FLOAT /* 0x8c */
+    .long .L_OP_INT_TO_BYTE /* 0x8d */
+    .long .L_OP_INT_TO_CHAR /* 0x8e */
+    .long .L_OP_INT_TO_SHORT /* 0x8f */
+    .long .L_OP_ADD_INT /* 0x90 */
+    .long .L_OP_SUB_INT /* 0x91 */
+    .long .L_OP_MUL_INT /* 0x92 */
+    .long .L_OP_DIV_INT /* 0x93 */
+    .long .L_OP_REM_INT /* 0x94 */
+    .long .L_OP_AND_INT /* 0x95 */
+    .long .L_OP_OR_INT /* 0x96 */
+    .long .L_OP_XOR_INT /* 0x97 */
+    .long .L_OP_SHL_INT /* 0x98 */
+    .long .L_OP_SHR_INT /* 0x99 */
+    .long .L_OP_USHR_INT /* 0x9a */
+    .long .L_OP_ADD_LONG /* 0x9b */
+    .long .L_OP_SUB_LONG /* 0x9c */
+    .long .L_OP_MUL_LONG /* 0x9d */
+    .long .L_OP_DIV_LONG /* 0x9e */
+    .long .L_OP_REM_LONG /* 0x9f */
+    .long .L_OP_AND_LONG /* 0xa0 */
+    .long .L_OP_OR_LONG /* 0xa1 */
+    .long .L_OP_XOR_LONG /* 0xa2 */
+    .long .L_OP_SHL_LONG /* 0xa3 */
+    .long .L_OP_SHR_LONG /* 0xa4 */
+    .long .L_OP_USHR_LONG /* 0xa5 */
+    .long .L_OP_ADD_FLOAT /* 0xa6 */
+    .long .L_OP_SUB_FLOAT /* 0xa7 */
+    .long .L_OP_MUL_FLOAT /* 0xa8 */
+    .long .L_OP_DIV_FLOAT /* 0xa9 */
+    .long .L_OP_REM_FLOAT /* 0xaa */
+    .long .L_OP_ADD_DOUBLE /* 0xab */
+    .long .L_OP_SUB_DOUBLE /* 0xac */
+    .long .L_OP_MUL_DOUBLE /* 0xad */
+    .long .L_OP_DIV_DOUBLE /* 0xae */
+    .long .L_OP_REM_DOUBLE /* 0xaf */
+    .long .L_OP_ADD_INT_2ADDR /* 0xb0 */
+    .long .L_OP_SUB_INT_2ADDR /* 0xb1 */
+    .long .L_OP_MUL_INT_2ADDR /* 0xb2 */
+    .long .L_OP_DIV_INT_2ADDR /* 0xb3 */
+    .long .L_OP_REM_INT_2ADDR /* 0xb4 */
+    .long .L_OP_AND_INT_2ADDR /* 0xb5 */
+    .long .L_OP_OR_INT_2ADDR /* 0xb6 */
+    .long .L_OP_XOR_INT_2ADDR /* 0xb7 */
+    .long .L_OP_SHL_INT_2ADDR /* 0xb8 */
+    .long .L_OP_SHR_INT_2ADDR /* 0xb9 */
+    .long .L_OP_USHR_INT_2ADDR /* 0xba */
+    .long .L_OP_ADD_LONG_2ADDR /* 0xbb */
+    .long .L_OP_SUB_LONG_2ADDR /* 0xbc */
+    .long .L_OP_MUL_LONG_2ADDR /* 0xbd */
+    .long .L_OP_DIV_LONG_2ADDR /* 0xbe */
+    .long .L_OP_REM_LONG_2ADDR /* 0xbf */
+    .long .L_OP_AND_LONG_2ADDR /* 0xc0 */
+    .long .L_OP_OR_LONG_2ADDR /* 0xc1 */
+    .long .L_OP_XOR_LONG_2ADDR /* 0xc2 */
+    .long .L_OP_SHL_LONG_2ADDR /* 0xc3 */
+    .long .L_OP_SHR_LONG_2ADDR /* 0xc4 */
+    .long .L_OP_USHR_LONG_2ADDR /* 0xc5 */
+    .long .L_OP_ADD_FLOAT_2ADDR /* 0xc6 */
+    .long .L_OP_SUB_FLOAT_2ADDR /* 0xc7 */
+    .long .L_OP_MUL_FLOAT_2ADDR /* 0xc8 */
+    .long .L_OP_DIV_FLOAT_2ADDR /* 0xc9 */
+    .long .L_OP_REM_FLOAT_2ADDR /* 0xca */
+    .long .L_OP_ADD_DOUBLE_2ADDR /* 0xcb */
+    .long .L_OP_SUB_DOUBLE_2ADDR /* 0xcc */
+    .long .L_OP_MUL_DOUBLE_2ADDR /* 0xcd */
+    .long .L_OP_DIV_DOUBLE_2ADDR /* 0xce */
+    .long .L_OP_REM_DOUBLE_2ADDR /* 0xcf */
+    .long .L_OP_ADD_INT_LIT16 /* 0xd0 */
+    .long .L_OP_RSUB_INT /* 0xd1 */
+    .long .L_OP_MUL_INT_LIT16 /* 0xd2 */
+    .long .L_OP_DIV_INT_LIT16 /* 0xd3 */
+    .long .L_OP_REM_INT_LIT16 /* 0xd4 */
+    .long .L_OP_AND_INT_LIT16 /* 0xd5 */
+    .long .L_OP_OR_INT_LIT16 /* 0xd6 */
+    .long .L_OP_XOR_INT_LIT16 /* 0xd7 */
+    .long .L_OP_ADD_INT_LIT8 /* 0xd8 */
+    .long .L_OP_RSUB_INT_LIT8 /* 0xd9 */
+    .long .L_OP_MUL_INT_LIT8 /* 0xda */
+    .long .L_OP_DIV_INT_LIT8 /* 0xdb */
+    .long .L_OP_REM_INT_LIT8 /* 0xdc */
+    .long .L_OP_AND_INT_LIT8 /* 0xdd */
+    .long .L_OP_OR_INT_LIT8 /* 0xde */
+    .long .L_OP_XOR_INT_LIT8 /* 0xdf */
+    .long .L_OP_SHL_INT_LIT8 /* 0xe0 */
+    .long .L_OP_SHR_INT_LIT8 /* 0xe1 */
+    .long .L_OP_USHR_INT_LIT8 /* 0xe2 */
+    .long .L_OP_IGET_VOLATILE /* 0xe3 */
+    .long .L_OP_IPUT_VOLATILE /* 0xe4 */
+    .long .L_OP_SGET_VOLATILE /* 0xe5 */
+    .long .L_OP_SPUT_VOLATILE /* 0xe6 */
+    .long .L_OP_IGET_OBJECT_VOLATILE /* 0xe7 */
+    .long .L_OP_IGET_WIDE_VOLATILE /* 0xe8 */
+    .long .L_OP_IPUT_WIDE_VOLATILE /* 0xe9 */
+    .long .L_OP_SGET_WIDE_VOLATILE /* 0xea */
+    .long .L_OP_SPUT_WIDE_VOLATILE /* 0xeb */
+    .long .L_OP_BREAKPOINT /* 0xec */
+    .long .L_OP_THROW_VERIFICATION_ERROR /* 0xed */
+    .long .L_OP_EXECUTE_INLINE /* 0xee */
+    .long .L_OP_EXECUTE_INLINE_RANGE /* 0xef */
+    .long .L_OP_INVOKE_OBJECT_INIT_RANGE /* 0xf0 */
+    .long .L_OP_RETURN_VOID_BARRIER /* 0xf1 */
+    .long .L_OP_IGET_QUICK /* 0xf2 */
+    .long .L_OP_IGET_WIDE_QUICK /* 0xf3 */
+    .long .L_OP_IGET_OBJECT_QUICK /* 0xf4 */
+    .long .L_OP_IPUT_QUICK /* 0xf5 */
+    .long .L_OP_IPUT_WIDE_QUICK /* 0xf6 */
+    .long .L_OP_IPUT_OBJECT_QUICK /* 0xf7 */
+    .long .L_OP_INVOKE_VIRTUAL_QUICK /* 0xf8 */
+    .long .L_OP_INVOKE_VIRTUAL_QUICK_RANGE /* 0xf9 */
+    .long .L_OP_INVOKE_SUPER_QUICK /* 0xfa */
+    .long .L_OP_INVOKE_SUPER_QUICK_RANGE /* 0xfb */
+    .long .L_OP_IPUT_OBJECT_VOLATILE /* 0xfc */
+    .long .L_OP_SGET_OBJECT_VOLATILE /* 0xfd */
+    .long .L_OP_SPUT_OBJECT_VOLATILE /* 0xfe */
+    .long .L_OP_DISPATCH_FF /* 0xff */
+    .long .L_OP_CONST_CLASS_JUMBO /* 0x100 */
+    .long .L_OP_CHECK_CAST_JUMBO /* 0x101 */
+    .long .L_OP_INSTANCE_OF_JUMBO /* 0x102 */
+    .long .L_OP_NEW_INSTANCE_JUMBO /* 0x103 */
+    .long .L_OP_NEW_ARRAY_JUMBO /* 0x104 */
+    .long .L_OP_FILLED_NEW_ARRAY_JUMBO /* 0x105 */
+    .long .L_OP_IGET_JUMBO /* 0x106 */
+    .long .L_OP_IGET_WIDE_JUMBO /* 0x107 */
+    .long .L_OP_IGET_OBJECT_JUMBO /* 0x108 */
+    .long .L_OP_IGET_BOOLEAN_JUMBO /* 0x109 */
+    .long .L_OP_IGET_BYTE_JUMBO /* 0x10a */
+    .long .L_OP_IGET_CHAR_JUMBO /* 0x10b */
+    .long .L_OP_IGET_SHORT_JUMBO /* 0x10c */
+    .long .L_OP_IPUT_JUMBO /* 0x10d */
+    .long .L_OP_IPUT_WIDE_JUMBO /* 0x10e */
+    .long .L_OP_IPUT_OBJECT_JUMBO /* 0x10f */
+    .long .L_OP_IPUT_BOOLEAN_JUMBO /* 0x110 */
+    .long .L_OP_IPUT_BYTE_JUMBO /* 0x111 */
+    .long .L_OP_IPUT_CHAR_JUMBO /* 0x112 */
+    .long .L_OP_IPUT_SHORT_JUMBO /* 0x113 */
+    .long .L_OP_SGET_JUMBO /* 0x114 */
+    .long .L_OP_SGET_WIDE_JUMBO /* 0x115 */
+    .long .L_OP_SGET_OBJECT_JUMBO /* 0x116 */
+    .long .L_OP_SGET_BOOLEAN_JUMBO /* 0x117 */
+    .long .L_OP_SGET_BYTE_JUMBO /* 0x118 */
+    .long .L_OP_SGET_CHAR_JUMBO /* 0x119 */
+    .long .L_OP_SGET_SHORT_JUMBO /* 0x11a */
+    .long .L_OP_SPUT_JUMBO /* 0x11b */
+    .long .L_OP_SPUT_WIDE_JUMBO /* 0x11c */
+    .long .L_OP_SPUT_OBJECT_JUMBO /* 0x11d */
+    .long .L_OP_SPUT_BOOLEAN_JUMBO /* 0x11e */
+    .long .L_OP_SPUT_BYTE_JUMBO /* 0x11f */
+    .long .L_OP_SPUT_CHAR_JUMBO /* 0x120 */
+    .long .L_OP_SPUT_SHORT_JUMBO /* 0x121 */
+    .long .L_OP_INVOKE_VIRTUAL_JUMBO /* 0x122 */
+    .long .L_OP_INVOKE_SUPER_JUMBO /* 0x123 */
+    .long .L_OP_INVOKE_DIRECT_JUMBO /* 0x124 */
+    .long .L_OP_INVOKE_STATIC_JUMBO /* 0x125 */
+    .long .L_OP_INVOKE_INTERFACE_JUMBO /* 0x126 */
+    .long .L_OP_UNUSED_27FF /* 0x127 */
+    .long .L_OP_UNUSED_28FF /* 0x128 */
+    .long .L_OP_UNUSED_29FF /* 0x129 */
+    .long .L_OP_UNUSED_2AFF /* 0x12a */
+    .long .L_OP_UNUSED_2BFF /* 0x12b */
+    .long .L_OP_UNUSED_2CFF /* 0x12c */
+    .long .L_OP_UNUSED_2DFF /* 0x12d */
+    .long .L_OP_UNUSED_2EFF /* 0x12e */
+    .long .L_OP_UNUSED_2FFF /* 0x12f */
+    .long .L_OP_UNUSED_30FF /* 0x130 */
+    .long .L_OP_UNUSED_31FF /* 0x131 */
+    .long .L_OP_UNUSED_32FF /* 0x132 */
+    .long .L_OP_UNUSED_33FF /* 0x133 */
+    .long .L_OP_UNUSED_34FF /* 0x134 */
+    .long .L_OP_UNUSED_35FF /* 0x135 */
+    .long .L_OP_UNUSED_36FF /* 0x136 */
+    .long .L_OP_UNUSED_37FF /* 0x137 */
+    .long .L_OP_UNUSED_38FF /* 0x138 */
+    .long .L_OP_UNUSED_39FF /* 0x139 */
+    .long .L_OP_UNUSED_3AFF /* 0x13a */
+    .long .L_OP_UNUSED_3BFF /* 0x13b */
+    .long .L_OP_UNUSED_3CFF /* 0x13c */
+    .long .L_OP_UNUSED_3DFF /* 0x13d */
+    .long .L_OP_UNUSED_3EFF /* 0x13e */
+    .long .L_OP_UNUSED_3FFF /* 0x13f */
+    .long .L_OP_UNUSED_40FF /* 0x140 */
+    .long .L_OP_UNUSED_41FF /* 0x141 */
+    .long .L_OP_UNUSED_42FF /* 0x142 */
+    .long .L_OP_UNUSED_43FF /* 0x143 */
+    .long .L_OP_UNUSED_44FF /* 0x144 */
+    .long .L_OP_UNUSED_45FF /* 0x145 */
+    .long .L_OP_UNUSED_46FF /* 0x146 */
+    .long .L_OP_UNUSED_47FF /* 0x147 */
+    .long .L_OP_UNUSED_48FF /* 0x148 */
+    .long .L_OP_UNUSED_49FF /* 0x149 */
+    .long .L_OP_UNUSED_4AFF /* 0x14a */
+    .long .L_OP_UNUSED_4BFF /* 0x14b */
+    .long .L_OP_UNUSED_4CFF /* 0x14c */
+    .long .L_OP_UNUSED_4DFF /* 0x14d */
+    .long .L_OP_UNUSED_4EFF /* 0x14e */
+    .long .L_OP_UNUSED_4FFF /* 0x14f */
+    .long .L_OP_UNUSED_50FF /* 0x150 */
+    .long .L_OP_UNUSED_51FF /* 0x151 */
+    .long .L_OP_UNUSED_52FF /* 0x152 */
+    .long .L_OP_UNUSED_53FF /* 0x153 */
+    .long .L_OP_UNUSED_54FF /* 0x154 */
+    .long .L_OP_UNUSED_55FF /* 0x155 */
+    .long .L_OP_UNUSED_56FF /* 0x156 */
+    .long .L_OP_UNUSED_57FF /* 0x157 */
+    .long .L_OP_UNUSED_58FF /* 0x158 */
+    .long .L_OP_UNUSED_59FF /* 0x159 */
+    .long .L_OP_UNUSED_5AFF /* 0x15a */
+    .long .L_OP_UNUSED_5BFF /* 0x15b */
+    .long .L_OP_UNUSED_5CFF /* 0x15c */
+    .long .L_OP_UNUSED_5DFF /* 0x15d */
+    .long .L_OP_UNUSED_5EFF /* 0x15e */
+    .long .L_OP_UNUSED_5FFF /* 0x15f */
+    .long .L_OP_UNUSED_60FF /* 0x160 */
+    .long .L_OP_UNUSED_61FF /* 0x161 */
+    .long .L_OP_UNUSED_62FF /* 0x162 */
+    .long .L_OP_UNUSED_63FF /* 0x163 */
+    .long .L_OP_UNUSED_64FF /* 0x164 */
+    .long .L_OP_UNUSED_65FF /* 0x165 */
+    .long .L_OP_UNUSED_66FF /* 0x166 */
+    .long .L_OP_UNUSED_67FF /* 0x167 */
+    .long .L_OP_UNUSED_68FF /* 0x168 */
+    .long .L_OP_UNUSED_69FF /* 0x169 */
+    .long .L_OP_UNUSED_6AFF /* 0x16a */
+    .long .L_OP_UNUSED_6BFF /* 0x16b */
+    .long .L_OP_UNUSED_6CFF /* 0x16c */
+    .long .L_OP_UNUSED_6DFF /* 0x16d */
+    .long .L_OP_UNUSED_6EFF /* 0x16e */
+    .long .L_OP_UNUSED_6FFF /* 0x16f */
+    .long .L_OP_UNUSED_70FF /* 0x170 */
+    .long .L_OP_UNUSED_71FF /* 0x171 */
+    .long .L_OP_UNUSED_72FF /* 0x172 */
+    .long .L_OP_UNUSED_73FF /* 0x173 */
+    .long .L_OP_UNUSED_74FF /* 0x174 */
+    .long .L_OP_UNUSED_75FF /* 0x175 */
+    .long .L_OP_UNUSED_76FF /* 0x176 */
+    .long .L_OP_UNUSED_77FF /* 0x177 */
+    .long .L_OP_UNUSED_78FF /* 0x178 */
+    .long .L_OP_UNUSED_79FF /* 0x179 */
+    .long .L_OP_UNUSED_7AFF /* 0x17a */
+    .long .L_OP_UNUSED_7BFF /* 0x17b */
+    .long .L_OP_UNUSED_7CFF /* 0x17c */
+    .long .L_OP_UNUSED_7DFF /* 0x17d */
+    .long .L_OP_UNUSED_7EFF /* 0x17e */
+    .long .L_OP_UNUSED_7FFF /* 0x17f */
+    .long .L_OP_UNUSED_80FF /* 0x180 */
+    .long .L_OP_UNUSED_81FF /* 0x181 */
+    .long .L_OP_UNUSED_82FF /* 0x182 */
+    .long .L_OP_UNUSED_83FF /* 0x183 */
+    .long .L_OP_UNUSED_84FF /* 0x184 */
+    .long .L_OP_UNUSED_85FF /* 0x185 */
+    .long .L_OP_UNUSED_86FF /* 0x186 */
+    .long .L_OP_UNUSED_87FF /* 0x187 */
+    .long .L_OP_UNUSED_88FF /* 0x188 */
+    .long .L_OP_UNUSED_89FF /* 0x189 */
+    .long .L_OP_UNUSED_8AFF /* 0x18a */
+    .long .L_OP_UNUSED_8BFF /* 0x18b */
+    .long .L_OP_UNUSED_8CFF /* 0x18c */
+    .long .L_OP_UNUSED_8DFF /* 0x18d */
+    .long .L_OP_UNUSED_8EFF /* 0x18e */
+    .long .L_OP_UNUSED_8FFF /* 0x18f */
+    .long .L_OP_UNUSED_90FF /* 0x190 */
+    .long .L_OP_UNUSED_91FF /* 0x191 */
+    .long .L_OP_UNUSED_92FF /* 0x192 */
+    .long .L_OP_UNUSED_93FF /* 0x193 */
+    .long .L_OP_UNUSED_94FF /* 0x194 */
+    .long .L_OP_UNUSED_95FF /* 0x195 */
+    .long .L_OP_UNUSED_96FF /* 0x196 */
+    .long .L_OP_UNUSED_97FF /* 0x197 */
+    .long .L_OP_UNUSED_98FF /* 0x198 */
+    .long .L_OP_UNUSED_99FF /* 0x199 */
+    .long .L_OP_UNUSED_9AFF /* 0x19a */
+    .long .L_OP_UNUSED_9BFF /* 0x19b */
+    .long .L_OP_UNUSED_9CFF /* 0x19c */
+    .long .L_OP_UNUSED_9DFF /* 0x19d */
+    .long .L_OP_UNUSED_9EFF /* 0x19e */
+    .long .L_OP_UNUSED_9FFF /* 0x19f */
+    .long .L_OP_UNUSED_A0FF /* 0x1a0 */
+    .long .L_OP_UNUSED_A1FF /* 0x1a1 */
+    .long .L_OP_UNUSED_A2FF /* 0x1a2 */
+    .long .L_OP_UNUSED_A3FF /* 0x1a3 */
+    .long .L_OP_UNUSED_A4FF /* 0x1a4 */
+    .long .L_OP_UNUSED_A5FF /* 0x1a5 */
+    .long .L_OP_UNUSED_A6FF /* 0x1a6 */
+    .long .L_OP_UNUSED_A7FF /* 0x1a7 */
+    .long .L_OP_UNUSED_A8FF /* 0x1a8 */
+    .long .L_OP_UNUSED_A9FF /* 0x1a9 */
+    .long .L_OP_UNUSED_AAFF /* 0x1aa */
+    .long .L_OP_UNUSED_ABFF /* 0x1ab */
+    .long .L_OP_UNUSED_ACFF /* 0x1ac */
+    .long .L_OP_UNUSED_ADFF /* 0x1ad */
+    .long .L_OP_UNUSED_AEFF /* 0x1ae */
+    .long .L_OP_UNUSED_AFFF /* 0x1af */
+    .long .L_OP_UNUSED_B0FF /* 0x1b0 */
+    .long .L_OP_UNUSED_B1FF /* 0x1b1 */
+    .long .L_OP_UNUSED_B2FF /* 0x1b2 */
+    .long .L_OP_UNUSED_B3FF /* 0x1b3 */
+    .long .L_OP_UNUSED_B4FF /* 0x1b4 */
+    .long .L_OP_UNUSED_B5FF /* 0x1b5 */
+    .long .L_OP_UNUSED_B6FF /* 0x1b6 */
+    .long .L_OP_UNUSED_B7FF /* 0x1b7 */
+    .long .L_OP_UNUSED_B8FF /* 0x1b8 */
+    .long .L_OP_UNUSED_B9FF /* 0x1b9 */
+    .long .L_OP_UNUSED_BAFF /* 0x1ba */
+    .long .L_OP_UNUSED_BBFF /* 0x1bb */
+    .long .L_OP_UNUSED_BCFF /* 0x1bc */
+    .long .L_OP_UNUSED_BDFF /* 0x1bd */
+    .long .L_OP_UNUSED_BEFF /* 0x1be */
+    .long .L_OP_UNUSED_BFFF /* 0x1bf */
+    .long .L_OP_UNUSED_C0FF /* 0x1c0 */
+    .long .L_OP_UNUSED_C1FF /* 0x1c1 */
+    .long .L_OP_UNUSED_C2FF /* 0x1c2 */
+    .long .L_OP_UNUSED_C3FF /* 0x1c3 */
+    .long .L_OP_UNUSED_C4FF /* 0x1c4 */
+    .long .L_OP_UNUSED_C5FF /* 0x1c5 */
+    .long .L_OP_UNUSED_C6FF /* 0x1c6 */
+    .long .L_OP_UNUSED_C7FF /* 0x1c7 */
+    .long .L_OP_UNUSED_C8FF /* 0x1c8 */
+    .long .L_OP_UNUSED_C9FF /* 0x1c9 */
+    .long .L_OP_UNUSED_CAFF /* 0x1ca */
+    .long .L_OP_UNUSED_CBFF /* 0x1cb */
+    .long .L_OP_UNUSED_CCFF /* 0x1cc */
+    .long .L_OP_UNUSED_CDFF /* 0x1cd */
+    .long .L_OP_UNUSED_CEFF /* 0x1ce */
+    .long .L_OP_UNUSED_CFFF /* 0x1cf */
+    .long .L_OP_UNUSED_D0FF /* 0x1d0 */
+    .long .L_OP_UNUSED_D1FF /* 0x1d1 */
+    .long .L_OP_UNUSED_D2FF /* 0x1d2 */
+    .long .L_OP_UNUSED_D3FF /* 0x1d3 */
+    .long .L_OP_UNUSED_D4FF /* 0x1d4 */
+    .long .L_OP_UNUSED_D5FF /* 0x1d5 */
+    .long .L_OP_UNUSED_D6FF /* 0x1d6 */
+    .long .L_OP_UNUSED_D7FF /* 0x1d7 */
+    .long .L_OP_UNUSED_D8FF /* 0x1d8 */
+    .long .L_OP_UNUSED_D9FF /* 0x1d9 */
+    .long .L_OP_UNUSED_DAFF /* 0x1da */
+    .long .L_OP_UNUSED_DBFF /* 0x1db */
+    .long .L_OP_UNUSED_DCFF /* 0x1dc */
+    .long .L_OP_UNUSED_DDFF /* 0x1dd */
+    .long .L_OP_UNUSED_DEFF /* 0x1de */
+    .long .L_OP_UNUSED_DFFF /* 0x1df */
+    .long .L_OP_UNUSED_E0FF /* 0x1e0 */
+    .long .L_OP_UNUSED_E1FF /* 0x1e1 */
+    .long .L_OP_UNUSED_E2FF /* 0x1e2 */
+    .long .L_OP_UNUSED_E3FF /* 0x1e3 */
+    .long .L_OP_UNUSED_E4FF /* 0x1e4 */
+    .long .L_OP_UNUSED_E5FF /* 0x1e5 */
+    .long .L_OP_UNUSED_E6FF /* 0x1e6 */
+    .long .L_OP_UNUSED_E7FF /* 0x1e7 */
+    .long .L_OP_UNUSED_E8FF /* 0x1e8 */
+    .long .L_OP_UNUSED_E9FF /* 0x1e9 */
+    .long .L_OP_UNUSED_EAFF /* 0x1ea */
+    .long .L_OP_UNUSED_EBFF /* 0x1eb */
+    .long .L_OP_UNUSED_ECFF /* 0x1ec */
+    .long .L_OP_UNUSED_EDFF /* 0x1ed */
+    .long .L_OP_UNUSED_EEFF /* 0x1ee */
+    .long .L_OP_UNUSED_EFFF /* 0x1ef */
+    .long .L_OP_UNUSED_F0FF /* 0x1f0 */
+    .long .L_OP_UNUSED_F1FF /* 0x1f1 */
+    .long .L_OP_INVOKE_OBJECT_INIT_JUMBO /* 0x1f2 */
+    .long .L_OP_IGET_VOLATILE_JUMBO /* 0x1f3 */
+    .long .L_OP_IGET_WIDE_VOLATILE_JUMBO /* 0x1f4 */
+    .long .L_OP_IGET_OBJECT_VOLATILE_JUMBO /* 0x1f5 */
+    .long .L_OP_IPUT_VOLATILE_JUMBO /* 0x1f6 */
+    .long .L_OP_IPUT_WIDE_VOLATILE_JUMBO /* 0x1f7 */
+    .long .L_OP_IPUT_OBJECT_VOLATILE_JUMBO /* 0x1f8 */
+    .long .L_OP_SGET_VOLATILE_JUMBO /* 0x1f9 */
+    .long .L_OP_SGET_WIDE_VOLATILE_JUMBO /* 0x1fa */
+    .long .L_OP_SGET_OBJECT_VOLATILE_JUMBO /* 0x1fb */
+    .long .L_OP_SPUT_VOLATILE_JUMBO /* 0x1fc */
+    .long .L_OP_SPUT_WIDE_VOLATILE_JUMBO /* 0x1fd */
+    .long .L_OP_SPUT_OBJECT_VOLATILE_JUMBO /* 0x1fe */
+    .long .L_OP_THROW_VERIFICATION_ERROR_JUMBO /* 0x1ff */
+
+    .global dvmAsmAltInstructionStart
+    .text
+dvmAsmAltInstructionStart:
+    .long .L_ALT_OP_NOP /* 0x00 */
+    .long .L_ALT_OP_MOVE /* 0x01 */
+    .long .L_ALT_OP_MOVE_FROM16 /* 0x02 */
+    .long .L_ALT_OP_MOVE_16 /* 0x03 */
+    .long .L_ALT_OP_MOVE_WIDE /* 0x04 */
+    .long .L_ALT_OP_MOVE_WIDE_FROM16 /* 0x05 */
+    .long .L_ALT_OP_MOVE_WIDE_16 /* 0x06 */
+    .long .L_ALT_OP_MOVE_OBJECT /* 0x07 */
+    .long .L_ALT_OP_MOVE_OBJECT_FROM16 /* 0x08 */
+    .long .L_ALT_OP_MOVE_OBJECT_16 /* 0x09 */
+    .long .L_ALT_OP_MOVE_RESULT /* 0x0a */
+    .long .L_ALT_OP_MOVE_RESULT_WIDE /* 0x0b */
+    .long .L_ALT_OP_MOVE_RESULT_OBJECT /* 0x0c */
+    .long .L_ALT_OP_MOVE_EXCEPTION /* 0x0d */
+    .long .L_ALT_OP_RETURN_VOID /* 0x0e */
+    .long .L_ALT_OP_RETURN /* 0x0f */
+    .long .L_ALT_OP_RETURN_WIDE /* 0x10 */
+    .long .L_ALT_OP_RETURN_OBJECT /* 0x11 */
+    .long .L_ALT_OP_CONST_4 /* 0x12 */
+    .long .L_ALT_OP_CONST_16 /* 0x13 */
+    .long .L_ALT_OP_CONST /* 0x14 */
+    .long .L_ALT_OP_CONST_HIGH16 /* 0x15 */
+    .long .L_ALT_OP_CONST_WIDE_16 /* 0x16 */
+    .long .L_ALT_OP_CONST_WIDE_32 /* 0x17 */
+    .long .L_ALT_OP_CONST_WIDE /* 0x18 */
+    .long .L_ALT_OP_CONST_WIDE_HIGH16 /* 0x19 */
+    .long .L_ALT_OP_CONST_STRING /* 0x1a */
+    .long .L_ALT_OP_CONST_STRING_JUMBO /* 0x1b */
+    .long .L_ALT_OP_CONST_CLASS /* 0x1c */
+    .long .L_ALT_OP_MONITOR_ENTER /* 0x1d */
+    .long .L_ALT_OP_MONITOR_EXIT /* 0x1e */
+    .long .L_ALT_OP_CHECK_CAST /* 0x1f */
+    .long .L_ALT_OP_INSTANCE_OF /* 0x20 */
+    .long .L_ALT_OP_ARRAY_LENGTH /* 0x21 */
+    .long .L_ALT_OP_NEW_INSTANCE /* 0x22 */
+    .long .L_ALT_OP_NEW_ARRAY /* 0x23 */
+    .long .L_ALT_OP_FILLED_NEW_ARRAY /* 0x24 */
+    .long .L_ALT_OP_FILLED_NEW_ARRAY_RANGE /* 0x25 */
+    .long .L_ALT_OP_FILL_ARRAY_DATA /* 0x26 */
+    .long .L_ALT_OP_THROW /* 0x27 */
+    .long .L_ALT_OP_GOTO /* 0x28 */
+    .long .L_ALT_OP_GOTO_16 /* 0x29 */
+    .long .L_ALT_OP_GOTO_32 /* 0x2a */
+    .long .L_ALT_OP_PACKED_SWITCH /* 0x2b */
+    .long .L_ALT_OP_SPARSE_SWITCH /* 0x2c */
+    .long .L_ALT_OP_CMPL_FLOAT /* 0x2d */
+    .long .L_ALT_OP_CMPG_FLOAT /* 0x2e */
+    .long .L_ALT_OP_CMPL_DOUBLE /* 0x2f */
+    .long .L_ALT_OP_CMPG_DOUBLE /* 0x30 */
+    .long .L_ALT_OP_CMP_LONG /* 0x31 */
+    .long .L_ALT_OP_IF_EQ /* 0x32 */
+    .long .L_ALT_OP_IF_NE /* 0x33 */
+    .long .L_ALT_OP_IF_LT /* 0x34 */
+    .long .L_ALT_OP_IF_GE /* 0x35 */
+    .long .L_ALT_OP_IF_GT /* 0x36 */
+    .long .L_ALT_OP_IF_LE /* 0x37 */
+    .long .L_ALT_OP_IF_EQZ /* 0x38 */
+    .long .L_ALT_OP_IF_NEZ /* 0x39 */
+    .long .L_ALT_OP_IF_LTZ /* 0x3a */
+    .long .L_ALT_OP_IF_GEZ /* 0x3b */
+    .long .L_ALT_OP_IF_GTZ /* 0x3c */
+    .long .L_ALT_OP_IF_LEZ /* 0x3d */
+    .long .L_ALT_OP_UNUSED_3E /* 0x3e */
+    .long .L_ALT_OP_UNUSED_3F /* 0x3f */
+    .long .L_ALT_OP_UNUSED_40 /* 0x40 */
+    .long .L_ALT_OP_UNUSED_41 /* 0x41 */
+    .long .L_ALT_OP_UNUSED_42 /* 0x42 */
+    .long .L_ALT_OP_UNUSED_43 /* 0x43 */
+    .long .L_ALT_OP_AGET /* 0x44 */
+    .long .L_ALT_OP_AGET_WIDE /* 0x45 */
+    .long .L_ALT_OP_AGET_OBJECT /* 0x46 */
+    .long .L_ALT_OP_AGET_BOOLEAN /* 0x47 */
+    .long .L_ALT_OP_AGET_BYTE /* 0x48 */
+    .long .L_ALT_OP_AGET_CHAR /* 0x49 */
+    .long .L_ALT_OP_AGET_SHORT /* 0x4a */
+    .long .L_ALT_OP_APUT /* 0x4b */
+    .long .L_ALT_OP_APUT_WIDE /* 0x4c */
+    .long .L_ALT_OP_APUT_OBJECT /* 0x4d */
+    .long .L_ALT_OP_APUT_BOOLEAN /* 0x4e */
+    .long .L_ALT_OP_APUT_BYTE /* 0x4f */
+    .long .L_ALT_OP_APUT_CHAR /* 0x50 */
+    .long .L_ALT_OP_APUT_SHORT /* 0x51 */
+    .long .L_ALT_OP_IGET /* 0x52 */
+    .long .L_ALT_OP_IGET_WIDE /* 0x53 */
+    .long .L_ALT_OP_IGET_OBJECT /* 0x54 */
+    .long .L_ALT_OP_IGET_BOOLEAN /* 0x55 */
+    .long .L_ALT_OP_IGET_BYTE /* 0x56 */
+    .long .L_ALT_OP_IGET_CHAR /* 0x57 */
+    .long .L_ALT_OP_IGET_SHORT /* 0x58 */
+    .long .L_ALT_OP_IPUT /* 0x59 */
+    .long .L_ALT_OP_IPUT_WIDE /* 0x5a */
+    .long .L_ALT_OP_IPUT_OBJECT /* 0x5b */
+    .long .L_ALT_OP_IPUT_BOOLEAN /* 0x5c */
+    .long .L_ALT_OP_IPUT_BYTE /* 0x5d */
+    .long .L_ALT_OP_IPUT_CHAR /* 0x5e */
+    .long .L_ALT_OP_IPUT_SHORT /* 0x5f */
+    .long .L_ALT_OP_SGET /* 0x60 */
+    .long .L_ALT_OP_SGET_WIDE /* 0x61 */
+    .long .L_ALT_OP_SGET_OBJECT /* 0x62 */
+    .long .L_ALT_OP_SGET_BOOLEAN /* 0x63 */
+    .long .L_ALT_OP_SGET_BYTE /* 0x64 */
+    .long .L_ALT_OP_SGET_CHAR /* 0x65 */
+    .long .L_ALT_OP_SGET_SHORT /* 0x66 */
+    .long .L_ALT_OP_SPUT /* 0x67 */
+    .long .L_ALT_OP_SPUT_WIDE /* 0x68 */
+    .long .L_ALT_OP_SPUT_OBJECT /* 0x69 */
+    .long .L_ALT_OP_SPUT_BOOLEAN /* 0x6a */
+    .long .L_ALT_OP_SPUT_BYTE /* 0x6b */
+    .long .L_ALT_OP_SPUT_CHAR /* 0x6c */
+    .long .L_ALT_OP_SPUT_SHORT /* 0x6d */
+    .long .L_ALT_OP_INVOKE_VIRTUAL /* 0x6e */
+    .long .L_ALT_OP_INVOKE_SUPER /* 0x6f */
+    .long .L_ALT_OP_INVOKE_DIRECT /* 0x70 */
+    .long .L_ALT_OP_INVOKE_STATIC /* 0x71 */
+    .long .L_ALT_OP_INVOKE_INTERFACE /* 0x72 */
+    .long .L_ALT_OP_UNUSED_73 /* 0x73 */
+    .long .L_ALT_OP_INVOKE_VIRTUAL_RANGE /* 0x74 */
+    .long .L_ALT_OP_INVOKE_SUPER_RANGE /* 0x75 */
+    .long .L_ALT_OP_INVOKE_DIRECT_RANGE /* 0x76 */
+    .long .L_ALT_OP_INVOKE_STATIC_RANGE /* 0x77 */
+    .long .L_ALT_OP_INVOKE_INTERFACE_RANGE /* 0x78 */
+    .long .L_ALT_OP_UNUSED_79 /* 0x79 */
+    .long .L_ALT_OP_UNUSED_7A /* 0x7a */
+    .long .L_ALT_OP_NEG_INT /* 0x7b */
+    .long .L_ALT_OP_NOT_INT /* 0x7c */
+    .long .L_ALT_OP_NEG_LONG /* 0x7d */
+    .long .L_ALT_OP_NOT_LONG /* 0x7e */
+    .long .L_ALT_OP_NEG_FLOAT /* 0x7f */
+    .long .L_ALT_OP_NEG_DOUBLE /* 0x80 */
+    .long .L_ALT_OP_INT_TO_LONG /* 0x81 */
+    .long .L_ALT_OP_INT_TO_FLOAT /* 0x82 */
+    .long .L_ALT_OP_INT_TO_DOUBLE /* 0x83 */
+    .long .L_ALT_OP_LONG_TO_INT /* 0x84 */
+    .long .L_ALT_OP_LONG_TO_FLOAT /* 0x85 */
+    .long .L_ALT_OP_LONG_TO_DOUBLE /* 0x86 */
+    .long .L_ALT_OP_FLOAT_TO_INT /* 0x87 */
+    .long .L_ALT_OP_FLOAT_TO_LONG /* 0x88 */
+    .long .L_ALT_OP_FLOAT_TO_DOUBLE /* 0x89 */
+    .long .L_ALT_OP_DOUBLE_TO_INT /* 0x8a */
+    .long .L_ALT_OP_DOUBLE_TO_LONG /* 0x8b */
+    .long .L_ALT_OP_DOUBLE_TO_FLOAT /* 0x8c */
+    .long .L_ALT_OP_INT_TO_BYTE /* 0x8d */
+    .long .L_ALT_OP_INT_TO_CHAR /* 0x8e */
+    .long .L_ALT_OP_INT_TO_SHORT /* 0x8f */
+    .long .L_ALT_OP_ADD_INT /* 0x90 */
+    .long .L_ALT_OP_SUB_INT /* 0x91 */
+    .long .L_ALT_OP_MUL_INT /* 0x92 */
+    .long .L_ALT_OP_DIV_INT /* 0x93 */
+    .long .L_ALT_OP_REM_INT /* 0x94 */
+    .long .L_ALT_OP_AND_INT /* 0x95 */
+    .long .L_ALT_OP_OR_INT /* 0x96 */
+    .long .L_ALT_OP_XOR_INT /* 0x97 */
+    .long .L_ALT_OP_SHL_INT /* 0x98 */
+    .long .L_ALT_OP_SHR_INT /* 0x99 */
+    .long .L_ALT_OP_USHR_INT /* 0x9a */
+    .long .L_ALT_OP_ADD_LONG /* 0x9b */
+    .long .L_ALT_OP_SUB_LONG /* 0x9c */
+    .long .L_ALT_OP_MUL_LONG /* 0x9d */
+    .long .L_ALT_OP_DIV_LONG /* 0x9e */
+    .long .L_ALT_OP_REM_LONG /* 0x9f */
+    .long .L_ALT_OP_AND_LONG /* 0xa0 */
+    .long .L_ALT_OP_OR_LONG /* 0xa1 */
+    .long .L_ALT_OP_XOR_LONG /* 0xa2 */
+    .long .L_ALT_OP_SHL_LONG /* 0xa3 */
+    .long .L_ALT_OP_SHR_LONG /* 0xa4 */
+    .long .L_ALT_OP_USHR_LONG /* 0xa5 */
+    .long .L_ALT_OP_ADD_FLOAT /* 0xa6 */
+    .long .L_ALT_OP_SUB_FLOAT /* 0xa7 */
+    .long .L_ALT_OP_MUL_FLOAT /* 0xa8 */
+    .long .L_ALT_OP_DIV_FLOAT /* 0xa9 */
+    .long .L_ALT_OP_REM_FLOAT /* 0xaa */
+    .long .L_ALT_OP_ADD_DOUBLE /* 0xab */
+    .long .L_ALT_OP_SUB_DOUBLE /* 0xac */
+    .long .L_ALT_OP_MUL_DOUBLE /* 0xad */
+    .long .L_ALT_OP_DIV_DOUBLE /* 0xae */
+    .long .L_ALT_OP_REM_DOUBLE /* 0xaf */
+    .long .L_ALT_OP_ADD_INT_2ADDR /* 0xb0 */
+    .long .L_ALT_OP_SUB_INT_2ADDR /* 0xb1 */
+    .long .L_ALT_OP_MUL_INT_2ADDR /* 0xb2 */
+    .long .L_ALT_OP_DIV_INT_2ADDR /* 0xb3 */
+    .long .L_ALT_OP_REM_INT_2ADDR /* 0xb4 */
+    .long .L_ALT_OP_AND_INT_2ADDR /* 0xb5 */
+    .long .L_ALT_OP_OR_INT_2ADDR /* 0xb6 */
+    .long .L_ALT_OP_XOR_INT_2ADDR /* 0xb7 */
+    .long .L_ALT_OP_SHL_INT_2ADDR /* 0xb8 */
+    .long .L_ALT_OP_SHR_INT_2ADDR /* 0xb9 */
+    .long .L_ALT_OP_USHR_INT_2ADDR /* 0xba */
+    .long .L_ALT_OP_ADD_LONG_2ADDR /* 0xbb */
+    .long .L_ALT_OP_SUB_LONG_2ADDR /* 0xbc */
+    .long .L_ALT_OP_MUL_LONG_2ADDR /* 0xbd */
+    .long .L_ALT_OP_DIV_LONG_2ADDR /* 0xbe */
+    .long .L_ALT_OP_REM_LONG_2ADDR /* 0xbf */
+    .long .L_ALT_OP_AND_LONG_2ADDR /* 0xc0 */
+    .long .L_ALT_OP_OR_LONG_2ADDR /* 0xc1 */
+    .long .L_ALT_OP_XOR_LONG_2ADDR /* 0xc2 */
+    .long .L_ALT_OP_SHL_LONG_2ADDR /* 0xc3 */
+    .long .L_ALT_OP_SHR_LONG_2ADDR /* 0xc4 */
+    .long .L_ALT_OP_USHR_LONG_2ADDR /* 0xc5 */
+    .long .L_ALT_OP_ADD_FLOAT_2ADDR /* 0xc6 */
+    .long .L_ALT_OP_SUB_FLOAT_2ADDR /* 0xc7 */
+    .long .L_ALT_OP_MUL_FLOAT_2ADDR /* 0xc8 */
+    .long .L_ALT_OP_DIV_FLOAT_2ADDR /* 0xc9 */
+    .long .L_ALT_OP_REM_FLOAT_2ADDR /* 0xca */
+    .long .L_ALT_OP_ADD_DOUBLE_2ADDR /* 0xcb */
+    .long .L_ALT_OP_SUB_DOUBLE_2ADDR /* 0xcc */
+    .long .L_ALT_OP_MUL_DOUBLE_2ADDR /* 0xcd */
+    .long .L_ALT_OP_DIV_DOUBLE_2ADDR /* 0xce */
+    .long .L_ALT_OP_REM_DOUBLE_2ADDR /* 0xcf */
+    .long .L_ALT_OP_ADD_INT_LIT16 /* 0xd0 */
+    .long .L_ALT_OP_RSUB_INT /* 0xd1 */
+    .long .L_ALT_OP_MUL_INT_LIT16 /* 0xd2 */
+    .long .L_ALT_OP_DIV_INT_LIT16 /* 0xd3 */
+    .long .L_ALT_OP_REM_INT_LIT16 /* 0xd4 */
+    .long .L_ALT_OP_AND_INT_LIT16 /* 0xd5 */
+    .long .L_ALT_OP_OR_INT_LIT16 /* 0xd6 */
+    .long .L_ALT_OP_XOR_INT_LIT16 /* 0xd7 */
+    .long .L_ALT_OP_ADD_INT_LIT8 /* 0xd8 */
+    .long .L_ALT_OP_RSUB_INT_LIT8 /* 0xd9 */
+    .long .L_ALT_OP_MUL_INT_LIT8 /* 0xda */
+    .long .L_ALT_OP_DIV_INT_LIT8 /* 0xdb */
+    .long .L_ALT_OP_REM_INT_LIT8 /* 0xdc */
+    .long .L_ALT_OP_AND_INT_LIT8 /* 0xdd */
+    .long .L_ALT_OP_OR_INT_LIT8 /* 0xde */
+    .long .L_ALT_OP_XOR_INT_LIT8 /* 0xdf */
+    .long .L_ALT_OP_SHL_INT_LIT8 /* 0xe0 */
+    .long .L_ALT_OP_SHR_INT_LIT8 /* 0xe1 */
+    .long .L_ALT_OP_USHR_INT_LIT8 /* 0xe2 */
+    .long .L_ALT_OP_IGET_VOLATILE /* 0xe3 */
+    .long .L_ALT_OP_IPUT_VOLATILE /* 0xe4 */
+    .long .L_ALT_OP_SGET_VOLATILE /* 0xe5 */
+    .long .L_ALT_OP_SPUT_VOLATILE /* 0xe6 */
+    .long .L_ALT_OP_IGET_OBJECT_VOLATILE /* 0xe7 */
+    .long .L_ALT_OP_IGET_WIDE_VOLATILE /* 0xe8 */
+    .long .L_ALT_OP_IPUT_WIDE_VOLATILE /* 0xe9 */
+    .long .L_ALT_OP_SGET_WIDE_VOLATILE /* 0xea */
+    .long .L_ALT_OP_SPUT_WIDE_VOLATILE /* 0xeb */
+    .long .L_ALT_OP_BREAKPOINT /* 0xec */
+    .long .L_ALT_OP_THROW_VERIFICATION_ERROR /* 0xed */
+    .long .L_ALT_OP_EXECUTE_INLINE /* 0xee */
+    .long .L_ALT_OP_EXECUTE_INLINE_RANGE /* 0xef */
+    .long .L_ALT_OP_INVOKE_OBJECT_INIT_RANGE /* 0xf0 */
+    .long .L_ALT_OP_RETURN_VOID_BARRIER /* 0xf1 */
+    .long .L_ALT_OP_IGET_QUICK /* 0xf2 */
+    .long .L_ALT_OP_IGET_WIDE_QUICK /* 0xf3 */
+    .long .L_ALT_OP_IGET_OBJECT_QUICK /* 0xf4 */
+    .long .L_ALT_OP_IPUT_QUICK /* 0xf5 */
+    .long .L_ALT_OP_IPUT_WIDE_QUICK /* 0xf6 */
+    .long .L_ALT_OP_IPUT_OBJECT_QUICK /* 0xf7 */
+    .long .L_ALT_OP_INVOKE_VIRTUAL_QUICK /* 0xf8 */
+    .long .L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE /* 0xf9 */
+    .long .L_ALT_OP_INVOKE_SUPER_QUICK /* 0xfa */
+    .long .L_ALT_OP_INVOKE_SUPER_QUICK_RANGE /* 0xfb */
+    .long .L_ALT_OP_IPUT_OBJECT_VOLATILE /* 0xfc */
+    .long .L_ALT_OP_SGET_OBJECT_VOLATILE /* 0xfd */
+    .long .L_ALT_OP_SPUT_OBJECT_VOLATILE /* 0xfe */
+    .long .L_ALT_OP_DISPATCH_FF /* 0xff */
+    .long .L_ALT_OP_CONST_CLASS_JUMBO /* 0x100 */
+    .long .L_ALT_OP_CHECK_CAST_JUMBO /* 0x101 */
+    .long .L_ALT_OP_INSTANCE_OF_JUMBO /* 0x102 */
+    .long .L_ALT_OP_NEW_INSTANCE_JUMBO /* 0x103 */
+    .long .L_ALT_OP_NEW_ARRAY_JUMBO /* 0x104 */
+    .long .L_ALT_OP_FILLED_NEW_ARRAY_JUMBO /* 0x105 */
+    .long .L_ALT_OP_IGET_JUMBO /* 0x106 */
+    .long .L_ALT_OP_IGET_WIDE_JUMBO /* 0x107 */
+    .long .L_ALT_OP_IGET_OBJECT_JUMBO /* 0x108 */
+    .long .L_ALT_OP_IGET_BOOLEAN_JUMBO /* 0x109 */
+    .long .L_ALT_OP_IGET_BYTE_JUMBO /* 0x10a */
+    .long .L_ALT_OP_IGET_CHAR_JUMBO /* 0x10b */
+    .long .L_ALT_OP_IGET_SHORT_JUMBO /* 0x10c */
+    .long .L_ALT_OP_IPUT_JUMBO /* 0x10d */
+    .long .L_ALT_OP_IPUT_WIDE_JUMBO /* 0x10e */
+    .long .L_ALT_OP_IPUT_OBJECT_JUMBO /* 0x10f */
+    .long .L_ALT_OP_IPUT_BOOLEAN_JUMBO /* 0x110 */
+    .long .L_ALT_OP_IPUT_BYTE_JUMBO /* 0x111 */
+    .long .L_ALT_OP_IPUT_CHAR_JUMBO /* 0x112 */
+    .long .L_ALT_OP_IPUT_SHORT_JUMBO /* 0x113 */
+    .long .L_ALT_OP_SGET_JUMBO /* 0x114 */
+    .long .L_ALT_OP_SGET_WIDE_JUMBO /* 0x115 */
+    .long .L_ALT_OP_SGET_OBJECT_JUMBO /* 0x116 */
+    .long .L_ALT_OP_SGET_BOOLEAN_JUMBO /* 0x117 */
+    .long .L_ALT_OP_SGET_BYTE_JUMBO /* 0x118 */
+    .long .L_ALT_OP_SGET_CHAR_JUMBO /* 0x119 */
+    .long .L_ALT_OP_SGET_SHORT_JUMBO /* 0x11a */
+    .long .L_ALT_OP_SPUT_JUMBO /* 0x11b */
+    .long .L_ALT_OP_SPUT_WIDE_JUMBO /* 0x11c */
+    .long .L_ALT_OP_SPUT_OBJECT_JUMBO /* 0x11d */
+    .long .L_ALT_OP_SPUT_BOOLEAN_JUMBO /* 0x11e */
+    .long .L_ALT_OP_SPUT_BYTE_JUMBO /* 0x11f */
+    .long .L_ALT_OP_SPUT_CHAR_JUMBO /* 0x120 */
+    .long .L_ALT_OP_SPUT_SHORT_JUMBO /* 0x121 */
+    .long .L_ALT_OP_INVOKE_VIRTUAL_JUMBO /* 0x122 */
+    .long .L_ALT_OP_INVOKE_SUPER_JUMBO /* 0x123 */
+    .long .L_ALT_OP_INVOKE_DIRECT_JUMBO /* 0x124 */
+    .long .L_ALT_OP_INVOKE_STATIC_JUMBO /* 0x125 */
+    .long .L_ALT_OP_INVOKE_INTERFACE_JUMBO /* 0x126 */
+    .long .L_ALT_OP_UNUSED_27FF /* 0x127 */
+    .long .L_ALT_OP_UNUSED_28FF /* 0x128 */
+    .long .L_ALT_OP_UNUSED_29FF /* 0x129 */
+    .long .L_ALT_OP_UNUSED_2AFF /* 0x12a */
+    .long .L_ALT_OP_UNUSED_2BFF /* 0x12b */
+    .long .L_ALT_OP_UNUSED_2CFF /* 0x12c */
+    .long .L_ALT_OP_UNUSED_2DFF /* 0x12d */
+    .long .L_ALT_OP_UNUSED_2EFF /* 0x12e */
+    .long .L_ALT_OP_UNUSED_2FFF /* 0x12f */
+    .long .L_ALT_OP_UNUSED_30FF /* 0x130 */
+    .long .L_ALT_OP_UNUSED_31FF /* 0x131 */
+    .long .L_ALT_OP_UNUSED_32FF /* 0x132 */
+    .long .L_ALT_OP_UNUSED_33FF /* 0x133 */
+    .long .L_ALT_OP_UNUSED_34FF /* 0x134 */
+    .long .L_ALT_OP_UNUSED_35FF /* 0x135 */
+    .long .L_ALT_OP_UNUSED_36FF /* 0x136 */
+    .long .L_ALT_OP_UNUSED_37FF /* 0x137 */
+    .long .L_ALT_OP_UNUSED_38FF /* 0x138 */
+    .long .L_ALT_OP_UNUSED_39FF /* 0x139 */
+    .long .L_ALT_OP_UNUSED_3AFF /* 0x13a */
+    .long .L_ALT_OP_UNUSED_3BFF /* 0x13b */
+    .long .L_ALT_OP_UNUSED_3CFF /* 0x13c */
+    .long .L_ALT_OP_UNUSED_3DFF /* 0x13d */
+    .long .L_ALT_OP_UNUSED_3EFF /* 0x13e */
+    .long .L_ALT_OP_UNUSED_3FFF /* 0x13f */
+    .long .L_ALT_OP_UNUSED_40FF /* 0x140 */
+    .long .L_ALT_OP_UNUSED_41FF /* 0x141 */
+    .long .L_ALT_OP_UNUSED_42FF /* 0x142 */
+    .long .L_ALT_OP_UNUSED_43FF /* 0x143 */
+    .long .L_ALT_OP_UNUSED_44FF /* 0x144 */
+    .long .L_ALT_OP_UNUSED_45FF /* 0x145 */
+    .long .L_ALT_OP_UNUSED_46FF /* 0x146 */
+    .long .L_ALT_OP_UNUSED_47FF /* 0x147 */
+    .long .L_ALT_OP_UNUSED_48FF /* 0x148 */
+    .long .L_ALT_OP_UNUSED_49FF /* 0x149 */
+    .long .L_ALT_OP_UNUSED_4AFF /* 0x14a */
+    .long .L_ALT_OP_UNUSED_4BFF /* 0x14b */
+    .long .L_ALT_OP_UNUSED_4CFF /* 0x14c */
+    .long .L_ALT_OP_UNUSED_4DFF /* 0x14d */
+    .long .L_ALT_OP_UNUSED_4EFF /* 0x14e */
+    .long .L_ALT_OP_UNUSED_4FFF /* 0x14f */
+    .long .L_ALT_OP_UNUSED_50FF /* 0x150 */
+    .long .L_ALT_OP_UNUSED_51FF /* 0x151 */
+    .long .L_ALT_OP_UNUSED_52FF /* 0x152 */
+    .long .L_ALT_OP_UNUSED_53FF /* 0x153 */
+    .long .L_ALT_OP_UNUSED_54FF /* 0x154 */
+    .long .L_ALT_OP_UNUSED_55FF /* 0x155 */
+    .long .L_ALT_OP_UNUSED_56FF /* 0x156 */
+    .long .L_ALT_OP_UNUSED_57FF /* 0x157 */
+    .long .L_ALT_OP_UNUSED_58FF /* 0x158 */
+    .long .L_ALT_OP_UNUSED_59FF /* 0x159 */
+    .long .L_ALT_OP_UNUSED_5AFF /* 0x15a */
+    .long .L_ALT_OP_UNUSED_5BFF /* 0x15b */
+    .long .L_ALT_OP_UNUSED_5CFF /* 0x15c */
+    .long .L_ALT_OP_UNUSED_5DFF /* 0x15d */
+    .long .L_ALT_OP_UNUSED_5EFF /* 0x15e */
+    .long .L_ALT_OP_UNUSED_5FFF /* 0x15f */
+    .long .L_ALT_OP_UNUSED_60FF /* 0x160 */
+    .long .L_ALT_OP_UNUSED_61FF /* 0x161 */
+    .long .L_ALT_OP_UNUSED_62FF /* 0x162 */
+    .long .L_ALT_OP_UNUSED_63FF /* 0x163 */
+    .long .L_ALT_OP_UNUSED_64FF /* 0x164 */
+    .long .L_ALT_OP_UNUSED_65FF /* 0x165 */
+    .long .L_ALT_OP_UNUSED_66FF /* 0x166 */
+    .long .L_ALT_OP_UNUSED_67FF /* 0x167 */
+    .long .L_ALT_OP_UNUSED_68FF /* 0x168 */
+    .long .L_ALT_OP_UNUSED_69FF /* 0x169 */
+    .long .L_ALT_OP_UNUSED_6AFF /* 0x16a */
+    .long .L_ALT_OP_UNUSED_6BFF /* 0x16b */
+    .long .L_ALT_OP_UNUSED_6CFF /* 0x16c */
+    .long .L_ALT_OP_UNUSED_6DFF /* 0x16d */
+    .long .L_ALT_OP_UNUSED_6EFF /* 0x16e */
+    .long .L_ALT_OP_UNUSED_6FFF /* 0x16f */
+    .long .L_ALT_OP_UNUSED_70FF /* 0x170 */
+    .long .L_ALT_OP_UNUSED_71FF /* 0x171 */
+    .long .L_ALT_OP_UNUSED_72FF /* 0x172 */
+    .long .L_ALT_OP_UNUSED_73FF /* 0x173 */
+    .long .L_ALT_OP_UNUSED_74FF /* 0x174 */
+    .long .L_ALT_OP_UNUSED_75FF /* 0x175 */
+    .long .L_ALT_OP_UNUSED_76FF /* 0x176 */
+    .long .L_ALT_OP_UNUSED_77FF /* 0x177 */
+    .long .L_ALT_OP_UNUSED_78FF /* 0x178 */
+    .long .L_ALT_OP_UNUSED_79FF /* 0x179 */
+    .long .L_ALT_OP_UNUSED_7AFF /* 0x17a */
+    .long .L_ALT_OP_UNUSED_7BFF /* 0x17b */
+    .long .L_ALT_OP_UNUSED_7CFF /* 0x17c */
+    .long .L_ALT_OP_UNUSED_7DFF /* 0x17d */
+    .long .L_ALT_OP_UNUSED_7EFF /* 0x17e */
+    .long .L_ALT_OP_UNUSED_7FFF /* 0x17f */
+    .long .L_ALT_OP_UNUSED_80FF /* 0x180 */
+    .long .L_ALT_OP_UNUSED_81FF /* 0x181 */
+    .long .L_ALT_OP_UNUSED_82FF /* 0x182 */
+    .long .L_ALT_OP_UNUSED_83FF /* 0x183 */
+    .long .L_ALT_OP_UNUSED_84FF /* 0x184 */
+    .long .L_ALT_OP_UNUSED_85FF /* 0x185 */
+    .long .L_ALT_OP_UNUSED_86FF /* 0x186 */
+    .long .L_ALT_OP_UNUSED_87FF /* 0x187 */
+    .long .L_ALT_OP_UNUSED_88FF /* 0x188 */
+    .long .L_ALT_OP_UNUSED_89FF /* 0x189 */
+    .long .L_ALT_OP_UNUSED_8AFF /* 0x18a */
+    .long .L_ALT_OP_UNUSED_8BFF /* 0x18b */
+    .long .L_ALT_OP_UNUSED_8CFF /* 0x18c */
+    .long .L_ALT_OP_UNUSED_8DFF /* 0x18d */
+    .long .L_ALT_OP_UNUSED_8EFF /* 0x18e */
+    .long .L_ALT_OP_UNUSED_8FFF /* 0x18f */
+    .long .L_ALT_OP_UNUSED_90FF /* 0x190 */
+    .long .L_ALT_OP_UNUSED_91FF /* 0x191 */
+    .long .L_ALT_OP_UNUSED_92FF /* 0x192 */
+    .long .L_ALT_OP_UNUSED_93FF /* 0x193 */
+    .long .L_ALT_OP_UNUSED_94FF /* 0x194 */
+    .long .L_ALT_OP_UNUSED_95FF /* 0x195 */
+    .long .L_ALT_OP_UNUSED_96FF /* 0x196 */
+    .long .L_ALT_OP_UNUSED_97FF /* 0x197 */
+    .long .L_ALT_OP_UNUSED_98FF /* 0x198 */
+    .long .L_ALT_OP_UNUSED_99FF /* 0x199 */
+    .long .L_ALT_OP_UNUSED_9AFF /* 0x19a */
+    .long .L_ALT_OP_UNUSED_9BFF /* 0x19b */
+    .long .L_ALT_OP_UNUSED_9CFF /* 0x19c */
+    .long .L_ALT_OP_UNUSED_9DFF /* 0x19d */
+    .long .L_ALT_OP_UNUSED_9EFF /* 0x19e */
+    .long .L_ALT_OP_UNUSED_9FFF /* 0x19f */
+    .long .L_ALT_OP_UNUSED_A0FF /* 0x1a0 */
+    .long .L_ALT_OP_UNUSED_A1FF /* 0x1a1 */
+    .long .L_ALT_OP_UNUSED_A2FF /* 0x1a2 */
+    .long .L_ALT_OP_UNUSED_A3FF /* 0x1a3 */
+    .long .L_ALT_OP_UNUSED_A4FF /* 0x1a4 */
+    .long .L_ALT_OP_UNUSED_A5FF /* 0x1a5 */
+    .long .L_ALT_OP_UNUSED_A6FF /* 0x1a6 */
+    .long .L_ALT_OP_UNUSED_A7FF /* 0x1a7 */
+    .long .L_ALT_OP_UNUSED_A8FF /* 0x1a8 */
+    .long .L_ALT_OP_UNUSED_A9FF /* 0x1a9 */
+    .long .L_ALT_OP_UNUSED_AAFF /* 0x1aa */
+    .long .L_ALT_OP_UNUSED_ABFF /* 0x1ab */
+    .long .L_ALT_OP_UNUSED_ACFF /* 0x1ac */
+    .long .L_ALT_OP_UNUSED_ADFF /* 0x1ad */
+    .long .L_ALT_OP_UNUSED_AEFF /* 0x1ae */
+    .long .L_ALT_OP_UNUSED_AFFF /* 0x1af */
+    .long .L_ALT_OP_UNUSED_B0FF /* 0x1b0 */
+    .long .L_ALT_OP_UNUSED_B1FF /* 0x1b1 */
+    .long .L_ALT_OP_UNUSED_B2FF /* 0x1b2 */
+    .long .L_ALT_OP_UNUSED_B3FF /* 0x1b3 */
+    .long .L_ALT_OP_UNUSED_B4FF /* 0x1b4 */
+    .long .L_ALT_OP_UNUSED_B5FF /* 0x1b5 */
+    .long .L_ALT_OP_UNUSED_B6FF /* 0x1b6 */
+    .long .L_ALT_OP_UNUSED_B7FF /* 0x1b7 */
+    .long .L_ALT_OP_UNUSED_B8FF /* 0x1b8 */
+    .long .L_ALT_OP_UNUSED_B9FF /* 0x1b9 */
+    .long .L_ALT_OP_UNUSED_BAFF /* 0x1ba */
+    .long .L_ALT_OP_UNUSED_BBFF /* 0x1bb */
+    .long .L_ALT_OP_UNUSED_BCFF /* 0x1bc */
+    .long .L_ALT_OP_UNUSED_BDFF /* 0x1bd */
+    .long .L_ALT_OP_UNUSED_BEFF /* 0x1be */
+    .long .L_ALT_OP_UNUSED_BFFF /* 0x1bf */
+    .long .L_ALT_OP_UNUSED_C0FF /* 0x1c0 */
+    .long .L_ALT_OP_UNUSED_C1FF /* 0x1c1 */
+    .long .L_ALT_OP_UNUSED_C2FF /* 0x1c2 */
+    .long .L_ALT_OP_UNUSED_C3FF /* 0x1c3 */
+    .long .L_ALT_OP_UNUSED_C4FF /* 0x1c4 */
+    .long .L_ALT_OP_UNUSED_C5FF /* 0x1c5 */
+    .long .L_ALT_OP_UNUSED_C6FF /* 0x1c6 */
+    .long .L_ALT_OP_UNUSED_C7FF /* 0x1c7 */
+    .long .L_ALT_OP_UNUSED_C8FF /* 0x1c8 */
+    .long .L_ALT_OP_UNUSED_C9FF /* 0x1c9 */
+    .long .L_ALT_OP_UNUSED_CAFF /* 0x1ca */
+    .long .L_ALT_OP_UNUSED_CBFF /* 0x1cb */
+    .long .L_ALT_OP_UNUSED_CCFF /* 0x1cc */
+    .long .L_ALT_OP_UNUSED_CDFF /* 0x1cd */
+    .long .L_ALT_OP_UNUSED_CEFF /* 0x1ce */
+    .long .L_ALT_OP_UNUSED_CFFF /* 0x1cf */
+    .long .L_ALT_OP_UNUSED_D0FF /* 0x1d0 */
+    .long .L_ALT_OP_UNUSED_D1FF /* 0x1d1 */
+    .long .L_ALT_OP_UNUSED_D2FF /* 0x1d2 */
+    .long .L_ALT_OP_UNUSED_D3FF /* 0x1d3 */
+    .long .L_ALT_OP_UNUSED_D4FF /* 0x1d4 */
+    .long .L_ALT_OP_UNUSED_D5FF /* 0x1d5 */
+    .long .L_ALT_OP_UNUSED_D6FF /* 0x1d6 */
+    .long .L_ALT_OP_UNUSED_D7FF /* 0x1d7 */
+    .long .L_ALT_OP_UNUSED_D8FF /* 0x1d8 */
+    .long .L_ALT_OP_UNUSED_D9FF /* 0x1d9 */
+    .long .L_ALT_OP_UNUSED_DAFF /* 0x1da */
+    .long .L_ALT_OP_UNUSED_DBFF /* 0x1db */
+    .long .L_ALT_OP_UNUSED_DCFF /* 0x1dc */
+    .long .L_ALT_OP_UNUSED_DDFF /* 0x1dd */
+    .long .L_ALT_OP_UNUSED_DEFF /* 0x1de */
+    .long .L_ALT_OP_UNUSED_DFFF /* 0x1df */
+    .long .L_ALT_OP_UNUSED_E0FF /* 0x1e0 */
+    .long .L_ALT_OP_UNUSED_E1FF /* 0x1e1 */
+    .long .L_ALT_OP_UNUSED_E2FF /* 0x1e2 */
+    .long .L_ALT_OP_UNUSED_E3FF /* 0x1e3 */
+    .long .L_ALT_OP_UNUSED_E4FF /* 0x1e4 */
+    .long .L_ALT_OP_UNUSED_E5FF /* 0x1e5 */
+    .long .L_ALT_OP_UNUSED_E6FF /* 0x1e6 */
+    .long .L_ALT_OP_UNUSED_E7FF /* 0x1e7 */
+    .long .L_ALT_OP_UNUSED_E8FF /* 0x1e8 */
+    .long .L_ALT_OP_UNUSED_E9FF /* 0x1e9 */
+    .long .L_ALT_OP_UNUSED_EAFF /* 0x1ea */
+    .long .L_ALT_OP_UNUSED_EBFF /* 0x1eb */
+    .long .L_ALT_OP_UNUSED_ECFF /* 0x1ec */
+    .long .L_ALT_OP_UNUSED_EDFF /* 0x1ed */
+    .long .L_ALT_OP_UNUSED_EEFF /* 0x1ee */
+    .long .L_ALT_OP_UNUSED_EFFF /* 0x1ef */
+    .long .L_ALT_OP_UNUSED_F0FF /* 0x1f0 */
+    .long .L_ALT_OP_UNUSED_F1FF /* 0x1f1 */
+    .long .L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO /* 0x1f2 */
+    .long .L_ALT_OP_IGET_VOLATILE_JUMBO /* 0x1f3 */
+    .long .L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO /* 0x1f4 */
+    .long .L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO /* 0x1f5 */
+    .long .L_ALT_OP_IPUT_VOLATILE_JUMBO /* 0x1f6 */
+    .long .L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO /* 0x1f7 */
+    .long .L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO /* 0x1f8 */
+    .long .L_ALT_OP_SGET_VOLATILE_JUMBO /* 0x1f9 */
+    .long .L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO /* 0x1fa */
+    .long .L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO /* 0x1fb */
+    .long .L_ALT_OP_SPUT_VOLATILE_JUMBO /* 0x1fc */
+    .long .L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO /* 0x1fd */
+    .long .L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO /* 0x1fe */
+    .long .L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO /* 0x1ff */
 /* File: x86/entry.S */
 /*
  * Copyright (C) 2008 The Android Open Source Project
@@ -8747,15 +21259,15 @@
     .global dvmMterpStdRun
     .type   dvmMterpStdRun, %function
 /*
- * bool dvmMterpStdRun(MterpGlue* glue)
+ * bool dvmMterpStdRun(Thread* self)
  *
  * Interpreter entry point.  Returns changeInterp.
  *
  */
 dvmMterpStdRun:
-    movl    4(%esp), %ecx        # get incoming rGLUE
+    movl    4(%esp), %ecx        # get incoming rSELF
     push    %ebp                 # save caller base pointer
-    push    %ecx                 # save rGLUE at (%ebp)
+    push    %ecx                 # save rSELF at (%ebp)
     movl    %esp, %ebp           # set our %ebp
 /*
  * At this point we've allocated two slots on the stack
@@ -8771,14 +21283,15 @@
     movl    %ebx,EBX_SPILL(%ebp)
 
 /* Set up "named" registers */
-    movl    offGlue_pc(%ecx),rPC
-    movl    offGlue_fp(%ecx),rFP
+    movl    offThread_pc(%ecx),rPC
+    movl    offThread_fp(%ecx),rFP
+    movl    offThread_curHandlerTable(%ecx),rIBASE
 
 /* Remember %esp for future "longjmp" */
-    movl    %esp,offGlue_bailPtr(%ecx)
+    movl    %esp,offThread_bailPtr(%ecx)
 
 /* How to start? */
-    movb    offGlue_entryPoint(%ecx),%al
+    movb    offThread_entryPoint(%ecx),%al
 
 /* Normal start? */
     cmpb    $kInterpEntryInstr,%al
@@ -8790,7 +21303,7 @@
 
 .Lnot_instr:
     /* Reset to normal case */
-    movb   $kInterpEntryInstr,offGlue_entryPoint(%ecx)
+    movb   $kInterpEntryInstr,offThread_entryPoint(%ecx)
     cmpb   $kInterpEntryReturn,%al
     je     common_returnFromMethod
     cmpb   $kInterpEntryThrow,%al
@@ -8806,7 +21319,7 @@
     .global dvmMterpStdBail
     .type   dvmMterpStdBail, %function
 /*
- * void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+ * void dvmMterpStdBail(Thread* self, bool changeInterp)
  *
  * Restore the stack pointer and PC from the save point established on entry.
  * This is essentially the same as a longjmp, but should be cheaper.  The
@@ -8816,13 +21329,13 @@
  * look a little strange.
  *
  * On entry:
- *  esp+4 (arg0)  MterpGlue* glue
+ *  esp+4 (arg0)  Thread* self
  *  esp+8 (arg1)  bool changeInterp
  */
 dvmMterpStdBail:
-    movl    4(%esp),%ecx                 # grab glue
+    movl    4(%esp),%ecx                 # grab self
     movl    8(%esp),%eax                 # changeInterp to return reg
-    movl    offGlue_bailPtr(%ecx),%esp   # Restore "setjmp" esp
+    movl    offThread_bailPtr(%ecx),%esp   # Restore "setjmp" esp
     movl    %esp,%ebp
     addl    $(FRAME_SIZE-8), %ebp       # Restore %ebp at point of setjmp
     movl    EDI_SPILL(%ebp),%edi
@@ -8841,272 +21354,6 @@
     .asciz  "Bad entry point %d\n"
 
 
-/*
- * FIXME: Should have the config/rebuild mechanism generate this
- * for targets that need it.
- */
-
-/* Jump table */
-dvmAsmInstructionJmpTable = .LdvmAsmInstructionJmpTable
-.LdvmAsmInstructionJmpTable:
-.long .L_OP_NOP
-.long .L_OP_MOVE
-.long .L_OP_MOVE_FROM16
-.long .L_OP_MOVE_16
-.long .L_OP_MOVE_WIDE
-.long .L_OP_MOVE_WIDE_FROM16
-.long .L_OP_MOVE_WIDE_16
-.long .L_OP_MOVE_OBJECT
-.long .L_OP_MOVE_OBJECT_FROM16
-.long .L_OP_MOVE_OBJECT_16
-.long .L_OP_MOVE_RESULT
-.long .L_OP_MOVE_RESULT_WIDE
-.long .L_OP_MOVE_RESULT_OBJECT
-.long .L_OP_MOVE_EXCEPTION
-.long .L_OP_RETURN_VOID
-.long .L_OP_RETURN
-.long .L_OP_RETURN_WIDE
-.long .L_OP_RETURN_OBJECT
-.long .L_OP_CONST_4
-.long .L_OP_CONST_16
-.long .L_OP_CONST
-.long .L_OP_CONST_HIGH16
-.long .L_OP_CONST_WIDE_16
-.long .L_OP_CONST_WIDE_32
-.long .L_OP_CONST_WIDE
-.long .L_OP_CONST_WIDE_HIGH16
-.long .L_OP_CONST_STRING
-.long .L_OP_CONST_STRING_JUMBO
-.long .L_OP_CONST_CLASS
-.long .L_OP_MONITOR_ENTER
-.long .L_OP_MONITOR_EXIT
-.long .L_OP_CHECK_CAST
-.long .L_OP_INSTANCE_OF
-.long .L_OP_ARRAY_LENGTH
-.long .L_OP_NEW_INSTANCE
-.long .L_OP_NEW_ARRAY
-.long .L_OP_FILLED_NEW_ARRAY
-.long .L_OP_FILLED_NEW_ARRAY_RANGE
-.long .L_OP_FILL_ARRAY_DATA
-.long .L_OP_THROW
-.long .L_OP_GOTO
-.long .L_OP_GOTO_16
-.long .L_OP_GOTO_32
-.long .L_OP_PACKED_SWITCH
-.long .L_OP_SPARSE_SWITCH
-.long .L_OP_CMPL_FLOAT
-.long .L_OP_CMPG_FLOAT
-.long .L_OP_CMPL_DOUBLE
-.long .L_OP_CMPG_DOUBLE
-.long .L_OP_CMP_LONG
-.long .L_OP_IF_EQ
-.long .L_OP_IF_NE
-.long .L_OP_IF_LT
-.long .L_OP_IF_GE
-.long .L_OP_IF_GT
-.long .L_OP_IF_LE
-.long .L_OP_IF_EQZ
-.long .L_OP_IF_NEZ
-.long .L_OP_IF_LTZ
-.long .L_OP_IF_GEZ
-.long .L_OP_IF_GTZ
-.long .L_OP_IF_LEZ
-.long .L_OP_UNUSED_3E
-.long .L_OP_UNUSED_3F
-.long .L_OP_UNUSED_40
-.long .L_OP_UNUSED_41
-.long .L_OP_UNUSED_42
-.long .L_OP_UNUSED_43
-.long .L_OP_AGET
-.long .L_OP_AGET_WIDE
-.long .L_OP_AGET_OBJECT
-.long .L_OP_AGET_BOOLEAN
-.long .L_OP_AGET_BYTE
-.long .L_OP_AGET_CHAR
-.long .L_OP_AGET_SHORT
-.long .L_OP_APUT
-.long .L_OP_APUT_WIDE
-.long .L_OP_APUT_OBJECT
-.long .L_OP_APUT_BOOLEAN
-.long .L_OP_APUT_BYTE
-.long .L_OP_APUT_CHAR
-.long .L_OP_APUT_SHORT
-.long .L_OP_IGET
-.long .L_OP_IGET_WIDE
-.long .L_OP_IGET_OBJECT
-.long .L_OP_IGET_BOOLEAN
-.long .L_OP_IGET_BYTE
-.long .L_OP_IGET_CHAR
-.long .L_OP_IGET_SHORT
-.long .L_OP_IPUT
-.long .L_OP_IPUT_WIDE
-.long .L_OP_IPUT_OBJECT
-.long .L_OP_IPUT_BOOLEAN
-.long .L_OP_IPUT_BYTE
-.long .L_OP_IPUT_CHAR
-.long .L_OP_IPUT_SHORT
-.long .L_OP_SGET
-.long .L_OP_SGET_WIDE
-.long .L_OP_SGET_OBJECT
-.long .L_OP_SGET_BOOLEAN
-.long .L_OP_SGET_BYTE
-.long .L_OP_SGET_CHAR
-.long .L_OP_SGET_SHORT
-.long .L_OP_SPUT
-.long .L_OP_SPUT_WIDE
-.long .L_OP_SPUT_OBJECT
-.long .L_OP_SPUT_BOOLEAN
-.long .L_OP_SPUT_BYTE
-.long .L_OP_SPUT_CHAR
-.long .L_OP_SPUT_SHORT
-.long .L_OP_INVOKE_VIRTUAL
-.long .L_OP_INVOKE_SUPER
-.long .L_OP_INVOKE_DIRECT
-.long .L_OP_INVOKE_STATIC
-.long .L_OP_INVOKE_INTERFACE
-.long .L_OP_UNUSED_73
-.long .L_OP_INVOKE_VIRTUAL_RANGE
-.long .L_OP_INVOKE_SUPER_RANGE
-.long .L_OP_INVOKE_DIRECT_RANGE
-.long .L_OP_INVOKE_STATIC_RANGE
-.long .L_OP_INVOKE_INTERFACE_RANGE
-.long .L_OP_UNUSED_79
-.long .L_OP_UNUSED_7A
-.long .L_OP_NEG_INT
-.long .L_OP_NOT_INT
-.long .L_OP_NEG_LONG
-.long .L_OP_NOT_LONG
-.long .L_OP_NEG_FLOAT
-.long .L_OP_NEG_DOUBLE
-.long .L_OP_INT_TO_LONG
-.long .L_OP_INT_TO_FLOAT
-.long .L_OP_INT_TO_DOUBLE
-.long .L_OP_LONG_TO_INT
-.long .L_OP_LONG_TO_FLOAT
-.long .L_OP_LONG_TO_DOUBLE
-.long .L_OP_FLOAT_TO_INT
-.long .L_OP_FLOAT_TO_LONG
-.long .L_OP_FLOAT_TO_DOUBLE
-.long .L_OP_DOUBLE_TO_INT
-.long .L_OP_DOUBLE_TO_LONG
-.long .L_OP_DOUBLE_TO_FLOAT
-.long .L_OP_INT_TO_BYTE
-.long .L_OP_INT_TO_CHAR
-.long .L_OP_INT_TO_SHORT
-.long .L_OP_ADD_INT
-.long .L_OP_SUB_INT
-.long .L_OP_MUL_INT
-.long .L_OP_DIV_INT
-.long .L_OP_REM_INT
-.long .L_OP_AND_INT
-.long .L_OP_OR_INT
-.long .L_OP_XOR_INT
-.long .L_OP_SHL_INT
-.long .L_OP_SHR_INT
-.long .L_OP_USHR_INT
-.long .L_OP_ADD_LONG
-.long .L_OP_SUB_LONG
-.long .L_OP_MUL_LONG
-.long .L_OP_DIV_LONG
-.long .L_OP_REM_LONG
-.long .L_OP_AND_LONG
-.long .L_OP_OR_LONG
-.long .L_OP_XOR_LONG
-.long .L_OP_SHL_LONG
-.long .L_OP_SHR_LONG
-.long .L_OP_USHR_LONG
-.long .L_OP_ADD_FLOAT
-.long .L_OP_SUB_FLOAT
-.long .L_OP_MUL_FLOAT
-.long .L_OP_DIV_FLOAT
-.long .L_OP_REM_FLOAT
-.long .L_OP_ADD_DOUBLE
-.long .L_OP_SUB_DOUBLE
-.long .L_OP_MUL_DOUBLE
-.long .L_OP_DIV_DOUBLE
-.long .L_OP_REM_DOUBLE
-.long .L_OP_ADD_INT_2ADDR
-.long .L_OP_SUB_INT_2ADDR
-.long .L_OP_MUL_INT_2ADDR
-.long .L_OP_DIV_INT_2ADDR
-.long .L_OP_REM_INT_2ADDR
-.long .L_OP_AND_INT_2ADDR
-.long .L_OP_OR_INT_2ADDR
-.long .L_OP_XOR_INT_2ADDR
-.long .L_OP_SHL_INT_2ADDR
-.long .L_OP_SHR_INT_2ADDR
-.long .L_OP_USHR_INT_2ADDR
-.long .L_OP_ADD_LONG_2ADDR
-.long .L_OP_SUB_LONG_2ADDR
-.long .L_OP_MUL_LONG_2ADDR
-.long .L_OP_DIV_LONG_2ADDR
-.long .L_OP_REM_LONG_2ADDR
-.long .L_OP_AND_LONG_2ADDR
-.long .L_OP_OR_LONG_2ADDR
-.long .L_OP_XOR_LONG_2ADDR
-.long .L_OP_SHL_LONG_2ADDR
-.long .L_OP_SHR_LONG_2ADDR
-.long .L_OP_USHR_LONG_2ADDR
-.long .L_OP_ADD_FLOAT_2ADDR
-.long .L_OP_SUB_FLOAT_2ADDR
-.long .L_OP_MUL_FLOAT_2ADDR
-.long .L_OP_DIV_FLOAT_2ADDR
-.long .L_OP_REM_FLOAT_2ADDR
-.long .L_OP_ADD_DOUBLE_2ADDR
-.long .L_OP_SUB_DOUBLE_2ADDR
-.long .L_OP_MUL_DOUBLE_2ADDR
-.long .L_OP_DIV_DOUBLE_2ADDR
-.long .L_OP_REM_DOUBLE_2ADDR
-.long .L_OP_ADD_INT_LIT16
-.long .L_OP_RSUB_INT
-.long .L_OP_MUL_INT_LIT16
-.long .L_OP_DIV_INT_LIT16
-.long .L_OP_REM_INT_LIT16
-.long .L_OP_AND_INT_LIT16
-.long .L_OP_OR_INT_LIT16
-.long .L_OP_XOR_INT_LIT16
-.long .L_OP_ADD_INT_LIT8
-.long .L_OP_RSUB_INT_LIT8
-.long .L_OP_MUL_INT_LIT8
-.long .L_OP_DIV_INT_LIT8
-.long .L_OP_REM_INT_LIT8
-.long .L_OP_AND_INT_LIT8
-.long .L_OP_OR_INT_LIT8
-.long .L_OP_XOR_INT_LIT8
-.long .L_OP_SHL_INT_LIT8
-.long .L_OP_SHR_INT_LIT8
-.long .L_OP_USHR_INT_LIT8
-.long .L_OP_IGET_VOLATILE
-.long .L_OP_IPUT_VOLATILE
-.long .L_OP_SGET_VOLATILE
-.long .L_OP_SPUT_VOLATILE
-.long .L_OP_IGET_OBJECT_VOLATILE
-.long .L_OP_IGET_WIDE_VOLATILE
-.long .L_OP_IPUT_WIDE_VOLATILE
-.long .L_OP_SGET_WIDE_VOLATILE
-.long .L_OP_SPUT_WIDE_VOLATILE
-.long .L_OP_BREAKPOINT
-.long .L_OP_THROW_VERIFICATION_ERROR
-.long .L_OP_EXECUTE_INLINE
-.long .L_OP_EXECUTE_INLINE_RANGE
-.long .L_OP_INVOKE_DIRECT_EMPTY
-.long .L_OP_RETURN_VOID_BARRIER
-.long .L_OP_IGET_QUICK
-.long .L_OP_IGET_WIDE_QUICK
-.long .L_OP_IGET_OBJECT_QUICK
-.long .L_OP_IPUT_QUICK
-.long .L_OP_IPUT_WIDE_QUICK
-.long .L_OP_IPUT_OBJECT_QUICK
-.long .L_OP_INVOKE_VIRTUAL_QUICK
-.long .L_OP_INVOKE_VIRTUAL_QUICK_RANGE
-.long .L_OP_INVOKE_SUPER_QUICK
-.long .L_OP_INVOKE_SUPER_QUICK_RANGE
-.long .L_OP_IPUT_OBJECT_VOLATILE
-.long .L_OP_SGET_OBJECT_VOLATILE
-.long .L_OP_SPUT_OBJECT_VOLATILE
-.long .L_OP_DISPATCH_FF
-
-
 /* File: x86/footer.S */
 /*
  * Copyright (C) 2008 The Android Open Source Project
@@ -9129,24 +21376,138 @@
 
 #if defined(WITH_JIT)
 /*
- * Placeholder entries for x86 JIT
+ * JIT-related re-entries into the interpreter.  In general, if the
+ * exit from a translation can at some point be chained, the entry
+ * here requires that control arrived via a call, and that the "rp"
+ * on TOS is actually a pointer to a 32-bit cell containing the Dalvik PC
+ * of the next insn to handle.  If no chaining will happen, the entry
+ * should be reached via a direct jump and rPC set beforehand.
  */
+
     .global dvmJitToInterpPunt
+/*
+ * The compiler will generate a jump to this entry point when it is
+ * having difficulty translating a Dalvik instruction.  We must skip
+ * the code cache lookup & prevent chaining to avoid bouncing between
+ * the interpreter and code cache. rPC must be set on entry.
+ */
 dvmJitToInterpPunt:
+#if defined(WITH_JIT_TUNING)
+    movl   rPC, OUT_ARG0(%esp)
+    call   dvmBumpPunt
+#endif
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx
+    GOTO_NEXT_R %ecx
+
     .global dvmJitToInterpSingleStep
+/*
+ * Return to the interpreter to handle a single instruction.
+ * Should be reached via a call.
+ * On entry:
+ *   0(%esp)          <= native return address within trace
+ *   rPC              <= Dalvik PC of this instruction
+ *   OUT_ARG0+4(%esp) <= Dalvik PC of next instruction
+ */
 dvmJitToInterpSingleStep:
+    pop    %eax
+    movl   rSELF, %ecx
+    movl   OUT_ARG0(%esp), %edx
+    movl   %eax,offThread_jitResumeNPC(%ecx)
+    movl   %edx,offThread_jitResumeDPC(%ecx)
+    movl   $kInterpEntryInstr,offThread_entryPoint(%ecx)
+    movl   $1,rINST     # changeInterp <= true
+    jmp    common_gotoBail
+
     .global dvmJitToInterpNoChainNoProfile
+/*
+ * Return from the translation cache to the interpreter to do method
+ * invocation.  Check if the translation exists for the callee, but don't
+ * chain to it. rPC must be set on entry.
+ */
 dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+    call   dvmBumpNoChain
+#endif
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr        # is there a translation?
+    movl   rSELF,%ecx                # ecx <- self
+    movl   %eax,offThread_inJitCodeCache(%ecx)  # set inJitCodeCache flag
+    cmpl   $0, %eax
+    jz     1f
+    call   *%eax                     # exec translation if we've got one
+    # won't return
+1:
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx
+    GOTO_NEXT_R %ecx
+
+/*
+ * Return from the translation cache and immediately request a
+ * translation fro the exit target, but don't attempt to chain.
+ * rPC set on entry.
+ */
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
+#if defined(WITH_JIT_TUNING)
+    call   dvmBumpNoChain
+#endif
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr # is there a translation?
+    movl   rSELF,%ecx
+    cmpl   $0,%eax
+    movl   %eax,offThread_inJitCodeCache(%ecx)  # set inJitCodeCache flag
+    jz     1f
+    call   *%eax              # jump to tranlation
+    # won't return
+
+/* No Translation - request one */
+1:
+    GET_JIT_PROF_TABLE %ecx %eax
+    cmpl   $0, %eax          # JIT enabled?
+    jnz    2f                 # Request one if so
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx         # Continue interpreting if not
+    GOTO_NEXT_R %ecx
+2:
+    movl   $kJitTSelectRequestHot,rINST  # ask for trace select
+    jmp    common_selectTrace
+
+/*
+ * Return from the translation cache and immediately request a
+ * translation for the exit target.  Reached via a call, and
+ * (TOS)->rPC.
+ */
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
+    pop    rINST           # save chain cell address in callee save reg
+    movl   (rINST),rPC
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr # is there a translation?
+    cmpl   $0,%eax
+    jz     1b                 # no - ask for one
+    movl   %eax,OUT_ARG0(%esp)
+# FIXME - need to adjust rINST to beginning of sequence
+    movl   rINST,OUT_ARG1(%esp)
+    call   dvmJitChain        # Attempt dvmJitChain(codeAddr,chainAddr)
+    cmpl   $0,%eax           # Success?
+    jz     toInterpreter      # didn't chain - interpret
+    call   *%eax
+    # won't return
+
+/*
+ * Placeholder entries for x86 JIT
+ */
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
+toInterpreter:
     jmp  common_abort
 #endif
 
@@ -9157,20 +21518,94 @@
  *   ebx (a.k.a. rINST) -> PC adjustment in 16-bit words
  */
 common_backwardBranch:
-    movl    rGLUE,%ecx
-    call   common_periodicChecks  # Note: expects rPC to be preserved
+    movl    rSELF,%ecx
+    call   common_periodicChecks  # rPC and ecx/rSELF preserved
+#if defined(WITH_JIT)
+    GET_JIT_PROF_TABLE %ecx rIBASE
+    ADVANCE_PC_INDEXED rINST
+    cmpl   $0,rIBASE
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST
+    jz    1f                    # Profiling off - continue
+    .global updateProfile
+updateProfile:
+common_updateProfile:
+    # quick & dirty hash
+    movl   rPC, %eax
+    shrl   $12, %eax
+    xorl   rPC, %eax
+    andl   $((1<<JIT_PROF_SIZE_LOG_2)-1),%eax
+    decb   (%edx,%eax)
+    jz     2f
+1:
+    GOTO_NEXT
+2:
+/*
+ * Here, we switch to the debug interpreter to request
+ * trace selection.  First, though, check to see if there
+ * is already a native translation in place (and, if so,
+ * jump to it now.
+ */
+    GET_JIT_THRESHOLD %ecx rINST  # leaves rSELF in %ecx
+    EXPORT_PC
+    movb   rINSTbl,(%edx,%eax)   # reset counter
+    movl   %ecx,rINST            # preserve rSELF
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr  # already have one?
+    movl   %eax,offThread_inJitCodeCache(rINST)   # set the inJitCodeCache flag
+    cmpl   $0,%eax
+    jz     1f
+    call   *%eax        # FIXME: decide call vs/ jmp!.  No return either way
+1:
+    movl   $kJitTSelectRequest,%eax
+    # On entry, eax<- jitState, rPC valid
+common_selectTrace:
+
+    movl   rSELF,%ecx
+    movl   %eax,offThread_jitState(%ecx)
+    movl   $kInterpEntryInstr,offThread_entryPoint(%ecx)
+    movl   $1,rINST
+    jmp    common_gotoBail
+#else
+    movl   offThread_curHandlerTable(%ecx),rIBASE
     ADVANCE_PC_INDEXED rINST
     FETCH_INST
     GOTO_NEXT
+#endif
 
 
 
 /*
+ * Common code for jumbo method invocation.
+ *
+ * On entry:
+ *   eax = Method* methodToCall
+ *   rINSTw trashed, must reload
+ *   rIBASE trashed, must reload before resuming interpreter
+ */
+
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+
+   /*
+    * prepare to copy args to "outs" area of current frame
+    */
+    movzwl      6(rPC),rINST            # rINST<- BBBB
+    movzwl      8(rPC), %ecx            # %ecx<- CCCC
+    ADVANCE_PC 2                        # adjust pc to make return similar
+    SAVEAREA_FROM_FP %edx               # %edx<- &StackSaveArea
+    test        rINST, rINST
+    movl        rINST, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- BBBB
+    jz          .LinvokeArgsDone        # no args; jump to args done
+    jmp         .LinvokeRangeArgs       # handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
  *   eax = Method* methodToCall
  *   rINSTw trashed, must reload
+ *   rIBASE trashed, must reload before resuming interpreter
  */
 
 common_invokeMethodRange:
@@ -9193,6 +21628,7 @@
     * (very few methods have > 10 args; could unroll for common cases)
     */
 
+.LinvokeRangeArgs:
     movl        %ebx, LOCAL1_OFFSET(%ebp)       # LOCAL1_OFFSET(%ebp)<- save %ebx
     lea         (rFP, %ecx, 4), %ecx    # %ecx<- &vCCCC
     shll        $2, LOCAL0_OFFSET(%ebp)        # LOCAL0_OFFSET(%ebp)<- offset
@@ -9211,6 +21647,7 @@
    /*
     * %eax is "Method* methodToCall", the method we're trying to call
     * prepare to copy args to "outs" area of current frame
+    * rIBASE trashed, must reload before resuming interpreter
     */
 
 common_invokeMethodNoRange:
@@ -9278,11 +21715,11 @@
     shl         $2, %edx               # %edx<- update offset
     SAVEAREA_FROM_FP %eax               # %eax<- &StackSaveArea
     subl        %edx, %eax              # %eax<- newFP; (old savearea - regsSize)
-    movl        rGLUE,%edx              # %edx<- pMterpGlue
+    movl        rSELF,%edx              # %edx<- pthread
     movl        %eax, LOCAL1_OFFSET(%ebp)       # LOCAL1_OFFSET(%ebp)<- &outs
     subl        $sizeofStackSaveArea, %eax # %eax<- newSaveArea (stack save area using newFP)
-    movl        offGlue_interpStackEnd(%edx), %edx # %edx<- glue->interpStackEnd
-    movl        %edx, LOCAL2_OFFSET(%ebp)       # LOCAL2_OFFSET<- glue->interpStackEnd
+    movl        offThread_interpStackEnd(%edx), %edx # %edx<- self->interpStackEnd
+    movl        %edx, LOCAL2_OFFSET(%ebp)       # LOCAL2_OFFSET<- self->interpStackEnd
     shl         $2, %ecx               # %ecx<- update offset for outsSize
     movl        %eax, %edx              # %edx<- newSaveArea
     sub         %ecx, %eax              # %eax<- bottom; (newSaveArea - outsSize)
@@ -9305,19 +21742,19 @@
     jne         .LinvokeNative          # handle native call
 
    /*
-    * Update "glue" values for the new method
+    * Update "self" values for the new method
     * %eax=methodToCall, LOCAL1_OFFSET(%ebp)=newFp
     */
 
     movl        offMethod_clazz(%eax), %edx # %edx<- method->clazz
-    movl        rGLUE,%ecx                  # %ecx<- pMterpGlue
+    movl        rSELF,%ecx                  # %ecx<- pthread
     movl        offClassObject_pDvmDex(%edx), %edx # %edx<- method->clazz->pDvmDex
-    movl        %eax, offGlue_method(%ecx) # glue->method<- methodToCall
-    movl        %edx, offGlue_methodClassDex(%ecx) # glue->methodClassDex<- method->clazz->pDvmDex
+    movl        %eax, offThread_method(%ecx) # self->method<- methodToCall
+    movl        %edx, offThread_methodClassDex(%ecx) # self->methodClassDex<- method->clazz->pDvmDex
     movl        offMethod_insns(%eax), rPC # rPC<- methodToCall->insns
-    movl        offGlue_self(%ecx), %eax # %eax<- glue->self
     movl        LOCAL1_OFFSET(%ebp), rFP # rFP<- newFP
-    movl        rFP, offThread_curFrame(%eax) # glue->self->curFrame<- newFP
+    movl        rFP, offThread_curFrame(%ecx) # self->curFrame<- newFP
+    movl        offThread_curHandlerTable(%ecx),rIBASE
     FETCH_INST
     GOTO_NEXT                           # jump to methodToCall->insns
 
@@ -9327,39 +21764,38 @@
     */
 
 .LinvokeNative:
-    movl        rGLUE,%ecx              # %ecx<- pMterpGlue
+    movl        rSELF,%ecx              # %ecx<- pthread
     movl        %eax, OUT_ARG1(%esp)    # push parameter methodToCall
-    movl        offGlue_self(%ecx), %ecx        # %ecx<- glue->self
     movl        offThread_jniLocal_topCookie(%ecx), %eax # %eax<- self->localRef->...
     movl        %eax, offStackSaveArea_localRefCookie(%edx) # newSaveArea->localRefCookie<- top
     movl        %edx, OUT_ARG4(%esp)    # save newSaveArea
     movl        LOCAL1_OFFSET(%ebp), %edx # %edx<- newFP
-    movl        %edx, offThread_curFrame(%ecx)  # glue->self->curFrame<- newFP
-    movl        %ecx, OUT_ARG3(%esp)    # save glue->self
-    movl        %ecx, OUT_ARG2(%esp)    # push parameter glue->self
-    movl        rGLUE,%ecx              # %ecx<- pMterpGlue
+    movl        %edx, offThread_curFrame(%ecx)  # self->curFrame<- newFP
+    movl        %ecx, OUT_ARG3(%esp)    # save self
+    movl        %ecx, OUT_ARG2(%esp)    # push parameter self
+    movl        rSELF,%ecx              # %ecx<- pthread
     movl        OUT_ARG1(%esp), %eax    # %eax<- methodToCall
-    lea         offGlue_retval(%ecx), %ecx # %ecx<- &retval
-    movl        %ecx, OUT_ARG0(%esp)    # push parameter pMterpGlue
+    lea         offThread_retval(%ecx), %ecx # %ecx<- &retval
+    movl        %ecx, OUT_ARG0(%esp)    # push parameter pthread
     push        %edx                    # push parameter newFP
 
     call        *offMethod_nativeFunc(%eax) # call methodToCall->nativeFunc
     lea         4(%esp), %esp
     movl        OUT_ARG4(%esp), %ecx    # %ecx<- newSaveArea
-    movl        OUT_ARG3(%esp), %eax    # %eax<- glue->self
+    movl        OUT_ARG3(%esp), %eax    # %eax<- self
     movl        offStackSaveArea_localRefCookie(%ecx), %edx # %edx<- old top
     cmp         $0, offThread_exception(%eax) # check for exception
-    movl        rFP, offThread_curFrame(%eax) # glue->self->curFrame<- rFP
+    movl        rFP, offThread_curFrame(%eax) # self->curFrame<- rFP
     movl        %edx, offThread_jniLocal_topCookie(%eax) # new top <- old top
     jne         common_exceptionThrown  # handle exception
-    FETCH_INST_OPCODE 3 %edx
+    movl        offThread_curHandlerTable(%eax),rIBASE
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx                    # jump to next instruction
+    GOTO_NEXT_R %ecx                    # jump to next instruction
 
 .LstackOverflow:    # eax=methodToCall
     movl        %eax, OUT_ARG1(%esp)    # push parameter methodToCall
-    movl        rGLUE,%eax              # %eax<- pMterpGlue
-    movl        offGlue_self(%eax), %eax # %eax<- glue->self
+    movl        rSELF,%eax              # %eax<- self
     movl        %eax, OUT_ARG0(%esp)    # push parameter self
     call        dvmHandleStackOverflow  # call: (Thread* self, Method* meth)
     jmp         common_exceptionThrown  # handle exception
@@ -9370,8 +21806,8 @@
  *
  * On entry:
  *   ebx  -> PC adjustment in 16-bit words (must be preserved)
- *   ecx  -> GLUE pointer
- *   reentry type, e.g. kInterpEntryInstr stored in rGLUE->entryPoint
+ *   ecx  -> SELF pointer
+ *   reentry type, e.g. kInterpEntryInstr stored in rSELF->entryPoint
  *
  * Note: A call will normally kill %eax and %ecx.  To
  *       streamline the normal case, this routine will preserve
@@ -9379,37 +21815,29 @@
  *       is a bit ugly, but will happen in the relatively uncommon path.
  * TODO: Basic-block style Jit will need a hook here as well.  Fold it into
  *       the suspendCount check so we can get both in 1 shot.
+ * TUNING: Improve scheduling here & do initial single test for all.
  */
 common_periodicChecks:
-    movl    offGlue_pSelfSuspendCount(%ecx),%eax    # eax <- &suspendCount
-    cmpl    $0,(%eax)
+    cmpl    $0,offThread_suspendCount(%ecx)     # non-zero suspendCount?
     jne     1f
 
 6:
-    movl   offGlue_pDebuggerActive(%ecx),%eax      # eax <- &DebuggerActive
-    movl   offGlue_pActiveProfilers(%ecx),%ecx     # ecx <- &ActiveProfilers
-    testl  %eax,%eax               # debugger enabled?
-    je     2f
-    movzbl (%eax),%eax             # get active count
-2:
-    orl    (%ecx),%eax             # eax <- debuggerActive | activeProfilers
-    movl   rGLUE,%ecx              # restore rGLUE
-    jne    3f                      # one or both active - switch interp
-
-5:
+    movl   offThread_pInterpBreak(%ecx),%eax    # eax <- &interpBreak
+    cmpl   $0,(%eax)              # something interesting happening?
+    jne    3f                      # yes - switch interpreters
     ret
 
     /* Check for suspend */
 1:
     /*  At this point, the return pointer to the caller of
      *  common_periodicChecks is on the top of stack.  We need to preserve
-     *  GLUE(ecx).
+     *  SELF(ecx).
      *  The outgoing profile is:
      *      bool dvmCheckSuspendPending(Thread* self)
      *  Because we reached here via a call, go ahead and build a new frame.
      */
     EXPORT_PC                         # need for precise GC
-    movl    offGlue_self(%ecx),%eax      # eax<- glue->self
+    movl    %ecx,%eax                 # eax<- self
     push    %ebp
     movl    %esp,%ebp
     subl    $24,%esp
@@ -9417,7 +21845,7 @@
     call    dvmCheckSuspendPending
     addl    $24,%esp
     pop     %ebp
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
 
     /*
      * Need to check to see if debugger or profiler flags got set
@@ -9437,7 +21865,7 @@
      */
 3:
     leal    (rPC,%ebx,2),rPC       # adjust pc to show target
-    movl    rGLUE,%ecx             # bail expect GLUE already loaded
+    movl    rSELF,%ecx             # bail expect SELF already loaded
     movl    $1,rINST              # set changeInterp to true
     jmp     common_gotoBail
 
@@ -9446,9 +21874,9 @@
  * Common code for handling a return instruction
  */
 common_returnFromMethod:
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     /* Set entry mode in case we bail */
-    movb    $kInterpEntryReturn,offGlue_entryPoint(%ecx)
+    movb    $kInterpEntryReturn,offThread_entryPoint(%ecx)
     xorl    rINST,rINST   # zero offset in case we switch interps
     call    common_periodicChecks   # Note: expects %ecx to be preserved
 
@@ -9459,17 +21887,17 @@
     je      common_gotoBail    # break frame, bail out completely
 
     movl    offStackSaveArea_savedPc(%eax),rPC    # pc<- saveArea->savedPC
-    movl    offGlue_self(%ecx),%eax               # eax<- self
-    movl    rINST,offGlue_method(%ecx)  # glue->method = newSave->meethod
-    movl    rFP,offThread_curFrame(%eax)     # self->curFrame = fp
-    movl    offMethod_clazz(rINST),%eax      # eax<- method->clazz
-    FETCH_INST_OPCODE 3 %edx
-    movl    offClassObject_pDvmDex(%eax),%eax # eax<- method->clazz->pDvmDex
+    movl    rINST,offThread_method(%ecx)          # self->method = newSave->meethod
+    movl    rFP,offThread_curFrame(%ecx)          # self->curFrame = fp
+    movl    offMethod_clazz(rINST),%eax           # eax<- method->clazz
+    movl    offThread_curHandlerTable(%ecx),rIBASE
+    movl    offClassObject_pDvmDex(%eax),rINST    # rINST<- method->clazz->pDvmDex
+    FETCH_INST_OPCODE 3 %eax
+    movl    rINST,offThread_methodClassDex(%ecx)
     ADVANCE_PC 3
-    movl    %eax,offGlue_methodClassDex(%ecx)
     /* not bailing - restore entry mode to default */
-    movb    $kInterpEntryInstr,offGlue_entryPoint(%ecx)
-    GOTO_NEXT_R %edx
+    movb    $kInterpEntryInstr,offThread_entryPoint(%ecx)
+    GOTO_NEXT_R %eax
 
 /*
  * Prepare to strip the current frame and "longjump" back to caller of
@@ -9477,24 +21905,27 @@
  *
  * on entry:
  *    rINST holds changeInterp
- *    ecx holds glue pointer
+ *    ecx holds self pointer
  *
- * expected profile: dvmMterpStdBail(MterpGlue *glue, bool changeInterp)
+ * expected profile: dvmMterpStdBail(Thread *self, bool changeInterp)
  */
 common_gotoBail:
-    movl   rPC,offGlue_pc(%ecx)     # export state to glue
-    movl   rFP,offGlue_fp(%ecx)
-    movl   %ecx,OUT_ARG0(%esp)      # glue in arg0
+    movl   rPC,offThread_pc(%ecx)     # export state to self
+    movl   rFP,offThread_fp(%ecx)
+    movl   %ecx,OUT_ARG0(%esp)      # self in arg0
     movl   rINST,OUT_ARG1(%esp)     # changeInterp in arg1
     call   dvmMterpStdBail          # bail out....
 
 
 /*
- * After returning from a "glued" function, pull out the updated values
+ * After returning from a "selfd" function, pull out the updated values
  * and start executing at the next instruction.
  */
  common_resumeAfterGlueCall:
-     LOAD_PC_FP_FROM_GLUE
+     movl  rSELF, %eax
+     movl  offThread_pc(%eax),rPC
+     movl  offThread_fp(%eax),rFP
+     movl  offThread_curHandlerTable(%eax),rIBASE
      FETCH_INST
      GOTO_NEXT
 
@@ -9503,36 +21934,30 @@
  */
 common_errDivideByZero:
     EXPORT_PC
-    movl    $.LstrArithmeticException,%eax
-    movl    %eax,OUT_ARG0(%esp)
     movl    $.LstrDivideByZero,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowArithmeticException
     jmp     common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry, len in eax
  */
 common_errNegativeArraySize:
     EXPORT_PC
-    movl    $.LstrNegativeArraySizeException,%eax
-    movl    %eax,OUT_ARG0(%esp)
-    xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)                  # arg0<- len
+    call    dvmThrowNegativeArraySizeException   # (len)
     jmp     common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry, method name in eax
  */
 common_errNoSuchMethod:
 
     EXPORT_PC
-    movl    $.LstrNoSuchMethodError,%eax
     movl    %eax,OUT_ARG0(%esp)
-    xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    call    dvmThrowNoSuchMethodError
     jmp     common_exceptionThrown
 
 /*
@@ -9541,11 +21966,9 @@
  */
 common_errNullObject:
     EXPORT_PC
-    movl    $.LstrNullPointerException,%eax
-    movl    %eax,OUT_ARG0(%esp)
     xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowNullPointerException
     jmp     common_exceptionThrown
 
 /*
@@ -9557,9 +21980,9 @@
 common_errArrayIndex:
     EXPORT_PC
     movl    offArrayObject_length(%eax), %eax
-    movl    %ecx,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowAIOOBE        # dvmThrowAIOO(index, length)
+    movl    %eax,OUT_ARG0(%esp)
+    movl    %ecx,OUT_ARG1(%esp)
+    call    dvmThrowArrayIndexOutOfBoundsException   # args (length, index)
     jmp     common_exceptionThrown
 
 /*
@@ -9572,9 +21995,9 @@
  * This does not return.
  */
 common_exceptionThrown:
-    movl    rGLUE,%ecx
-    movl    rPC,offGlue_pc(%ecx)
-    movl    rFP,offGlue_fp(%ecx)
+    movl    rSELF,%ecx
+    movl    rPC,offThread_pc(%ecx)
+    movl    rFP,offThread_fp(%ecx)
     movl    %ecx,OUT_ARG0(%esp)
     call    dvmMterp_exceptionThrown
     jmp     common_resumeAfterGlueCall
@@ -9589,20 +22012,8 @@
  */
 
     .section     .rodata
-.LstrNullPointerException:
-    .asciz    "Ljava/lang/NullPointerException;"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
 .LstrDivideByZero:
     .asciz  "divide by zero"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrInternalErrorA:
-    .asciz  "Ljava/lang/InternalError;"
 .LstrFilledNewArrayNotImplA:
     .asciz  "filled-new-array only implemented for 'int'"
 
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
index 01754cd..30c83c8 100644
--- a/vm/mterp/out/InterpC-allstubs.c
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: c/OP_NOP.c */
 HANDLE_OPCODE(OP_NOP)
     FINISH(1);
@@ -1549,12 +1647,8 @@
         if (!checkForNullExportPC(obj, fp, pc))
             GOTO_exceptionThrown();
         ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
-        EXPORT_PC();    /* need for precise GC, also WITH_MONITOR_TRACKING */
+        EXPORT_PC();    /* need for precise GC */
         dvmLockObject(self, obj);
-#ifdef WITH_DEADLOCK_PREDICTION
-        if (dvmCheckException(self))
-            GOTO_exceptionThrown();
-#endif
     }
     FINISH(1);
 OP_END
@@ -1700,15 +1794,15 @@
          * check is not needed for mterp.
          */
         if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
-            /* Class initialization is still ongoing - abandon the trace */
-            ABORT_JIT_TSELECT();
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
         }
 
         /*
          * Verifier now tests for interface/abstract class.
          */
         //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
-        //    dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
         //        clazz->descriptor);
         //    GOTO_exceptionThrown();
         //}
@@ -1736,7 +1830,7 @@
             vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
         length = (s4) GET_REGISTER(vsrc1);
         if (length < 0) {
-            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            dvmThrowNegativeArraySizeException(length);
             GOTO_exceptionThrown();
         }
         arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
@@ -1759,12 +1853,12 @@
 
 /* File: c/OP_FILLED_NEW_ARRAY.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
-    GOTO_invoke(filledNewArray, false);
+    GOTO_invoke(filledNewArray, false, false);
 OP_END
 
 /* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
-    GOTO_invoke(filledNewArray, true);
+    GOTO_invoke(filledNewArray, true, false);
 OP_END
 
 /* File: c/OP_FILL_ARRAY_DATA.c */
@@ -1784,8 +1878,7 @@
             arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
         {
             /* should have been caught in verifier */
-            dvmThrowException("Ljava/lang/InternalError;",
-                              "bad fill array data");
+            dvmThrowInternalError("bad fill array data");
             GOTO_exceptionThrown();
         }
 #endif
@@ -1886,7 +1979,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            dvmThrowInternalError("bad packed switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -1917,7 +2010,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            dvmThrowInternalError("bad sparse switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2075,7 +2168,8 @@
         if (!checkForNull((Object*) arrayObj))
             GOTO_exceptionThrown();
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);
+            dvmThrowArrayIndexOutOfBoundsException(
+                arrayObj->length, GET_REGISTER(vsrc2));
             GOTO_exceptionThrown();
         }
         obj = (Object*) GET_REGISTER(vdst);
@@ -2239,27 +2333,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtual, false);
+    GOTO_invoke(invokeVirtual, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuper, false);
+    GOTO_invoke(invokeSuper, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeDirect, false);
+    GOTO_invoke(invokeDirect, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeStatic, false);
+    GOTO_invoke(invokeStatic, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeInterface, false);
+    GOTO_invoke(invokeInterface, false, false);
 OP_END
 
 /* File: c/OP_UNUSED_73.c */
@@ -2268,27 +2362,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtual, true);
+    GOTO_invoke(invokeVirtual, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuper, true);
+    GOTO_invoke(invokeSuper, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeDirect, true);
+    GOTO_invoke(invokeDirect, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeStatic, true);
+    GOTO_invoke(invokeStatic, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeInterface, true);
+    GOTO_invoke(invokeInterface, true, false);
 OP_END
 
 /* File: c/OP_UNUSED_79.c */
@@ -2953,21 +3047,34 @@
     FINISH(3);
 OP_END
 
-/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
-HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-#if INTERP_TYPE != INTERP_DBG
-    //LOGI("Ignoring empty\n");
-    FINISH(3);
-#else
-    if (!gDvm.debuggerActive) {
-        //LOGI("Skipping empty\n");
-        FINISH(3);      // don't want it to show up in profiler output
-    } else {
-        //LOGI("Running empty\n");
-        /* fall through to OP_INVOKE_DIRECT */
-        GOTO_invoke(invokeDirect, false);
-    }
+/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
 #endif
+        FINISH(3);
+    }
 OP_END
 
 /* File: c/OP_RETURN_VOID_BARRIER.c */
@@ -3006,22 +3113,22 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtualQuick, false);
+    GOTO_invoke(invokeVirtualQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtualQuick, true);
+    GOTO_invoke(invokeVirtualQuick, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuperQuick, false);
+    GOTO_invoke(invokeSuperQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuperQuick, true);
+    GOTO_invoke(invokeSuperQuick, true, false);
 OP_END
 
 /* File: c/OP_IPUT_OBJECT_VOLATILE.c */
@@ -3039,13 +3146,1238 @@
 /* File: c/OP_DISPATCH_FF.c */
 HANDLE_OPCODE(OP_DISPATCH_FF)
     /*
+     * Indicates extended opcode.  Use next 8 bits to choose where to branch.
+     */
+    DISPATCH_EXTENDED(INST_AA(inst));
+OP_END
+
+/* File: c/OP_CONST_CLASS_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_CLASS_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|const-class/jumbo v%d class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(curMethod->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_CHECK_CAST_JUMBO.c */
+HANDLE_OPCODE(OP_CHECK_CAST_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vsrc1 = FETCH(3);
+        ILOGV("|check-cast/jumbo v%d,class@0x%08x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(curMethod->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowClassCastException(obj->clazz, clazz);
+                GOTO_exceptionThrown();
+            }
+        }
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_INSTANCE_OF_JUMBO.c */
+HANDLE_OPCODE(OP_INSTANCE_OF_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);   /* object to check */
+        ILOGV("|instance-of/jumbo v%d,v%d,class@0x%08x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(curMethod->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|new-instance/jumbo v%d,class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(curMethod->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO_exceptionThrown();
+
+        /*
+         * The JIT needs dvmDexGetResolvedClass() to return non-null.
+         * Since we use the portable interpreter to build the trace, this extra
+         * check is not needed for mterp.
+         */
+        if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
+        }
+
+        /*
+         * Verifier now tests for interface/abstract class.
+         */
+        //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
+        //        clazz->descriptor);
+        //    GOTO_exceptionThrown();
+        //}
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_ARRAY_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);       /* length reg */
+        ILOGV("|new-array/jumbo v%d,v%d,class@0x%08x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowNegativeArraySizeException(length);
+            GOTO_exceptionThrown();
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO_exceptionThrown();
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, class@AAAAAAAA*/)
+    GOTO_invoke(filledNewArray, true, true);
+OP_END
+
+/* File: c/OP_IGET_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_JUMBO.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible.  In practice, many popular VMs don't
+ * do this because it slows down a very common operation.  It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SGET_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BOOLEAN_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeVirtual, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeSuper, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeDirect, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeStatic, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeInterface, true, true);
+OP_END
+
+/* File: c/OP_UNUSED_27FF.c */
+HANDLE_OPCODE(OP_UNUSED_27FF)
+OP_END
+
+/* File: c/OP_UNUSED_28FF.c */
+HANDLE_OPCODE(OP_UNUSED_28FF)
+OP_END
+
+/* File: c/OP_UNUSED_29FF.c */
+HANDLE_OPCODE(OP_UNUSED_29FF)
+OP_END
+
+/* File: c/OP_UNUSED_2AFF.c */
+HANDLE_OPCODE(OP_UNUSED_2AFF)
+OP_END
+
+/* File: c/OP_UNUSED_2BFF.c */
+HANDLE_OPCODE(OP_UNUSED_2BFF)
+OP_END
+
+/* File: c/OP_UNUSED_2CFF.c */
+HANDLE_OPCODE(OP_UNUSED_2CFF)
+OP_END
+
+/* File: c/OP_UNUSED_2DFF.c */
+HANDLE_OPCODE(OP_UNUSED_2DFF)
+OP_END
+
+/* File: c/OP_UNUSED_2EFF.c */
+HANDLE_OPCODE(OP_UNUSED_2EFF)
+OP_END
+
+/* File: c/OP_UNUSED_2FFF.c */
+HANDLE_OPCODE(OP_UNUSED_2FFF)
+OP_END
+
+/* File: c/OP_UNUSED_30FF.c */
+HANDLE_OPCODE(OP_UNUSED_30FF)
+OP_END
+
+/* File: c/OP_UNUSED_31FF.c */
+HANDLE_OPCODE(OP_UNUSED_31FF)
+OP_END
+
+/* File: c/OP_UNUSED_32FF.c */
+HANDLE_OPCODE(OP_UNUSED_32FF)
+OP_END
+
+/* File: c/OP_UNUSED_33FF.c */
+HANDLE_OPCODE(OP_UNUSED_33FF)
+OP_END
+
+/* File: c/OP_UNUSED_34FF.c */
+HANDLE_OPCODE(OP_UNUSED_34FF)
+OP_END
+
+/* File: c/OP_UNUSED_35FF.c */
+HANDLE_OPCODE(OP_UNUSED_35FF)
+OP_END
+
+/* File: c/OP_UNUSED_36FF.c */
+HANDLE_OPCODE(OP_UNUSED_36FF)
+OP_END
+
+/* File: c/OP_UNUSED_37FF.c */
+HANDLE_OPCODE(OP_UNUSED_37FF)
+OP_END
+
+/* File: c/OP_UNUSED_38FF.c */
+HANDLE_OPCODE(OP_UNUSED_38FF)
+OP_END
+
+/* File: c/OP_UNUSED_39FF.c */
+HANDLE_OPCODE(OP_UNUSED_39FF)
+OP_END
+
+/* File: c/OP_UNUSED_3AFF.c */
+HANDLE_OPCODE(OP_UNUSED_3AFF)
+OP_END
+
+/* File: c/OP_UNUSED_3BFF.c */
+HANDLE_OPCODE(OP_UNUSED_3BFF)
+OP_END
+
+/* File: c/OP_UNUSED_3CFF.c */
+HANDLE_OPCODE(OP_UNUSED_3CFF)
+OP_END
+
+/* File: c/OP_UNUSED_3DFF.c */
+HANDLE_OPCODE(OP_UNUSED_3DFF)
+OP_END
+
+/* File: c/OP_UNUSED_3EFF.c */
+HANDLE_OPCODE(OP_UNUSED_3EFF)
+OP_END
+
+/* File: c/OP_UNUSED_3FFF.c */
+HANDLE_OPCODE(OP_UNUSED_3FFF)
+OP_END
+
+/* File: c/OP_UNUSED_40FF.c */
+HANDLE_OPCODE(OP_UNUSED_40FF)
+OP_END
+
+/* File: c/OP_UNUSED_41FF.c */
+HANDLE_OPCODE(OP_UNUSED_41FF)
+OP_END
+
+/* File: c/OP_UNUSED_42FF.c */
+HANDLE_OPCODE(OP_UNUSED_42FF)
+OP_END
+
+/* File: c/OP_UNUSED_43FF.c */
+HANDLE_OPCODE(OP_UNUSED_43FF)
+OP_END
+
+/* File: c/OP_UNUSED_44FF.c */
+HANDLE_OPCODE(OP_UNUSED_44FF)
+OP_END
+
+/* File: c/OP_UNUSED_45FF.c */
+HANDLE_OPCODE(OP_UNUSED_45FF)
+OP_END
+
+/* File: c/OP_UNUSED_46FF.c */
+HANDLE_OPCODE(OP_UNUSED_46FF)
+OP_END
+
+/* File: c/OP_UNUSED_47FF.c */
+HANDLE_OPCODE(OP_UNUSED_47FF)
+OP_END
+
+/* File: c/OP_UNUSED_48FF.c */
+HANDLE_OPCODE(OP_UNUSED_48FF)
+OP_END
+
+/* File: c/OP_UNUSED_49FF.c */
+HANDLE_OPCODE(OP_UNUSED_49FF)
+OP_END
+
+/* File: c/OP_UNUSED_4AFF.c */
+HANDLE_OPCODE(OP_UNUSED_4AFF)
+OP_END
+
+/* File: c/OP_UNUSED_4BFF.c */
+HANDLE_OPCODE(OP_UNUSED_4BFF)
+OP_END
+
+/* File: c/OP_UNUSED_4CFF.c */
+HANDLE_OPCODE(OP_UNUSED_4CFF)
+OP_END
+
+/* File: c/OP_UNUSED_4DFF.c */
+HANDLE_OPCODE(OP_UNUSED_4DFF)
+OP_END
+
+/* File: c/OP_UNUSED_4EFF.c */
+HANDLE_OPCODE(OP_UNUSED_4EFF)
+OP_END
+
+/* File: c/OP_UNUSED_4FFF.c */
+HANDLE_OPCODE(OP_UNUSED_4FFF)
+OP_END
+
+/* File: c/OP_UNUSED_50FF.c */
+HANDLE_OPCODE(OP_UNUSED_50FF)
+OP_END
+
+/* File: c/OP_UNUSED_51FF.c */
+HANDLE_OPCODE(OP_UNUSED_51FF)
+OP_END
+
+/* File: c/OP_UNUSED_52FF.c */
+HANDLE_OPCODE(OP_UNUSED_52FF)
+OP_END
+
+/* File: c/OP_UNUSED_53FF.c */
+HANDLE_OPCODE(OP_UNUSED_53FF)
+OP_END
+
+/* File: c/OP_UNUSED_54FF.c */
+HANDLE_OPCODE(OP_UNUSED_54FF)
+OP_END
+
+/* File: c/OP_UNUSED_55FF.c */
+HANDLE_OPCODE(OP_UNUSED_55FF)
+OP_END
+
+/* File: c/OP_UNUSED_56FF.c */
+HANDLE_OPCODE(OP_UNUSED_56FF)
+OP_END
+
+/* File: c/OP_UNUSED_57FF.c */
+HANDLE_OPCODE(OP_UNUSED_57FF)
+OP_END
+
+/* File: c/OP_UNUSED_58FF.c */
+HANDLE_OPCODE(OP_UNUSED_58FF)
+OP_END
+
+/* File: c/OP_UNUSED_59FF.c */
+HANDLE_OPCODE(OP_UNUSED_59FF)
+OP_END
+
+/* File: c/OP_UNUSED_5AFF.c */
+HANDLE_OPCODE(OP_UNUSED_5AFF)
+OP_END
+
+/* File: c/OP_UNUSED_5BFF.c */
+HANDLE_OPCODE(OP_UNUSED_5BFF)
+OP_END
+
+/* File: c/OP_UNUSED_5CFF.c */
+HANDLE_OPCODE(OP_UNUSED_5CFF)
+OP_END
+
+/* File: c/OP_UNUSED_5DFF.c */
+HANDLE_OPCODE(OP_UNUSED_5DFF)
+OP_END
+
+/* File: c/OP_UNUSED_5EFF.c */
+HANDLE_OPCODE(OP_UNUSED_5EFF)
+OP_END
+
+/* File: c/OP_UNUSED_5FFF.c */
+HANDLE_OPCODE(OP_UNUSED_5FFF)
+OP_END
+
+/* File: c/OP_UNUSED_60FF.c */
+HANDLE_OPCODE(OP_UNUSED_60FF)
+OP_END
+
+/* File: c/OP_UNUSED_61FF.c */
+HANDLE_OPCODE(OP_UNUSED_61FF)
+OP_END
+
+/* File: c/OP_UNUSED_62FF.c */
+HANDLE_OPCODE(OP_UNUSED_62FF)
+OP_END
+
+/* File: c/OP_UNUSED_63FF.c */
+HANDLE_OPCODE(OP_UNUSED_63FF)
+OP_END
+
+/* File: c/OP_UNUSED_64FF.c */
+HANDLE_OPCODE(OP_UNUSED_64FF)
+OP_END
+
+/* File: c/OP_UNUSED_65FF.c */
+HANDLE_OPCODE(OP_UNUSED_65FF)
+OP_END
+
+/* File: c/OP_UNUSED_66FF.c */
+HANDLE_OPCODE(OP_UNUSED_66FF)
+OP_END
+
+/* File: c/OP_UNUSED_67FF.c */
+HANDLE_OPCODE(OP_UNUSED_67FF)
+OP_END
+
+/* File: c/OP_UNUSED_68FF.c */
+HANDLE_OPCODE(OP_UNUSED_68FF)
+OP_END
+
+/* File: c/OP_UNUSED_69FF.c */
+HANDLE_OPCODE(OP_UNUSED_69FF)
+OP_END
+
+/* File: c/OP_UNUSED_6AFF.c */
+HANDLE_OPCODE(OP_UNUSED_6AFF)
+OP_END
+
+/* File: c/OP_UNUSED_6BFF.c */
+HANDLE_OPCODE(OP_UNUSED_6BFF)
+OP_END
+
+/* File: c/OP_UNUSED_6CFF.c */
+HANDLE_OPCODE(OP_UNUSED_6CFF)
+OP_END
+
+/* File: c/OP_UNUSED_6DFF.c */
+HANDLE_OPCODE(OP_UNUSED_6DFF)
+OP_END
+
+/* File: c/OP_UNUSED_6EFF.c */
+HANDLE_OPCODE(OP_UNUSED_6EFF)
+OP_END
+
+/* File: c/OP_UNUSED_6FFF.c */
+HANDLE_OPCODE(OP_UNUSED_6FFF)
+OP_END
+
+/* File: c/OP_UNUSED_70FF.c */
+HANDLE_OPCODE(OP_UNUSED_70FF)
+OP_END
+
+/* File: c/OP_UNUSED_71FF.c */
+HANDLE_OPCODE(OP_UNUSED_71FF)
+OP_END
+
+/* File: c/OP_UNUSED_72FF.c */
+HANDLE_OPCODE(OP_UNUSED_72FF)
+OP_END
+
+/* File: c/OP_UNUSED_73FF.c */
+HANDLE_OPCODE(OP_UNUSED_73FF)
+OP_END
+
+/* File: c/OP_UNUSED_74FF.c */
+HANDLE_OPCODE(OP_UNUSED_74FF)
+OP_END
+
+/* File: c/OP_UNUSED_75FF.c */
+HANDLE_OPCODE(OP_UNUSED_75FF)
+OP_END
+
+/* File: c/OP_UNUSED_76FF.c */
+HANDLE_OPCODE(OP_UNUSED_76FF)
+OP_END
+
+/* File: c/OP_UNUSED_77FF.c */
+HANDLE_OPCODE(OP_UNUSED_77FF)
+OP_END
+
+/* File: c/OP_UNUSED_78FF.c */
+HANDLE_OPCODE(OP_UNUSED_78FF)
+OP_END
+
+/* File: c/OP_UNUSED_79FF.c */
+HANDLE_OPCODE(OP_UNUSED_79FF)
+OP_END
+
+/* File: c/OP_UNUSED_7AFF.c */
+HANDLE_OPCODE(OP_UNUSED_7AFF)
+OP_END
+
+/* File: c/OP_UNUSED_7BFF.c */
+HANDLE_OPCODE(OP_UNUSED_7BFF)
+OP_END
+
+/* File: c/OP_UNUSED_7CFF.c */
+HANDLE_OPCODE(OP_UNUSED_7CFF)
+OP_END
+
+/* File: c/OP_UNUSED_7DFF.c */
+HANDLE_OPCODE(OP_UNUSED_7DFF)
+OP_END
+
+/* File: c/OP_UNUSED_7EFF.c */
+HANDLE_OPCODE(OP_UNUSED_7EFF)
+OP_END
+
+/* File: c/OP_UNUSED_7FFF.c */
+HANDLE_OPCODE(OP_UNUSED_7FFF)
+OP_END
+
+/* File: c/OP_UNUSED_80FF.c */
+HANDLE_OPCODE(OP_UNUSED_80FF)
+OP_END
+
+/* File: c/OP_UNUSED_81FF.c */
+HANDLE_OPCODE(OP_UNUSED_81FF)
+OP_END
+
+/* File: c/OP_UNUSED_82FF.c */
+HANDLE_OPCODE(OP_UNUSED_82FF)
+OP_END
+
+/* File: c/OP_UNUSED_83FF.c */
+HANDLE_OPCODE(OP_UNUSED_83FF)
+OP_END
+
+/* File: c/OP_UNUSED_84FF.c */
+HANDLE_OPCODE(OP_UNUSED_84FF)
+OP_END
+
+/* File: c/OP_UNUSED_85FF.c */
+HANDLE_OPCODE(OP_UNUSED_85FF)
+OP_END
+
+/* File: c/OP_UNUSED_86FF.c */
+HANDLE_OPCODE(OP_UNUSED_86FF)
+OP_END
+
+/* File: c/OP_UNUSED_87FF.c */
+HANDLE_OPCODE(OP_UNUSED_87FF)
+OP_END
+
+/* File: c/OP_UNUSED_88FF.c */
+HANDLE_OPCODE(OP_UNUSED_88FF)
+OP_END
+
+/* File: c/OP_UNUSED_89FF.c */
+HANDLE_OPCODE(OP_UNUSED_89FF)
+OP_END
+
+/* File: c/OP_UNUSED_8AFF.c */
+HANDLE_OPCODE(OP_UNUSED_8AFF)
+OP_END
+
+/* File: c/OP_UNUSED_8BFF.c */
+HANDLE_OPCODE(OP_UNUSED_8BFF)
+OP_END
+
+/* File: c/OP_UNUSED_8CFF.c */
+HANDLE_OPCODE(OP_UNUSED_8CFF)
+OP_END
+
+/* File: c/OP_UNUSED_8DFF.c */
+HANDLE_OPCODE(OP_UNUSED_8DFF)
+OP_END
+
+/* File: c/OP_UNUSED_8EFF.c */
+HANDLE_OPCODE(OP_UNUSED_8EFF)
+OP_END
+
+/* File: c/OP_UNUSED_8FFF.c */
+HANDLE_OPCODE(OP_UNUSED_8FFF)
+OP_END
+
+/* File: c/OP_UNUSED_90FF.c */
+HANDLE_OPCODE(OP_UNUSED_90FF)
+OP_END
+
+/* File: c/OP_UNUSED_91FF.c */
+HANDLE_OPCODE(OP_UNUSED_91FF)
+OP_END
+
+/* File: c/OP_UNUSED_92FF.c */
+HANDLE_OPCODE(OP_UNUSED_92FF)
+OP_END
+
+/* File: c/OP_UNUSED_93FF.c */
+HANDLE_OPCODE(OP_UNUSED_93FF)
+OP_END
+
+/* File: c/OP_UNUSED_94FF.c */
+HANDLE_OPCODE(OP_UNUSED_94FF)
+OP_END
+
+/* File: c/OP_UNUSED_95FF.c */
+HANDLE_OPCODE(OP_UNUSED_95FF)
+OP_END
+
+/* File: c/OP_UNUSED_96FF.c */
+HANDLE_OPCODE(OP_UNUSED_96FF)
+OP_END
+
+/* File: c/OP_UNUSED_97FF.c */
+HANDLE_OPCODE(OP_UNUSED_97FF)
+OP_END
+
+/* File: c/OP_UNUSED_98FF.c */
+HANDLE_OPCODE(OP_UNUSED_98FF)
+OP_END
+
+/* File: c/OP_UNUSED_99FF.c */
+HANDLE_OPCODE(OP_UNUSED_99FF)
+OP_END
+
+/* File: c/OP_UNUSED_9AFF.c */
+HANDLE_OPCODE(OP_UNUSED_9AFF)
+OP_END
+
+/* File: c/OP_UNUSED_9BFF.c */
+HANDLE_OPCODE(OP_UNUSED_9BFF)
+OP_END
+
+/* File: c/OP_UNUSED_9CFF.c */
+HANDLE_OPCODE(OP_UNUSED_9CFF)
+OP_END
+
+/* File: c/OP_UNUSED_9DFF.c */
+HANDLE_OPCODE(OP_UNUSED_9DFF)
+OP_END
+
+/* File: c/OP_UNUSED_9EFF.c */
+HANDLE_OPCODE(OP_UNUSED_9EFF)
+OP_END
+
+/* File: c/OP_UNUSED_9FFF.c */
+HANDLE_OPCODE(OP_UNUSED_9FFF)
+OP_END
+
+/* File: c/OP_UNUSED_A0FF.c */
+HANDLE_OPCODE(OP_UNUSED_A0FF)
+OP_END
+
+/* File: c/OP_UNUSED_A1FF.c */
+HANDLE_OPCODE(OP_UNUSED_A1FF)
+OP_END
+
+/* File: c/OP_UNUSED_A2FF.c */
+HANDLE_OPCODE(OP_UNUSED_A2FF)
+OP_END
+
+/* File: c/OP_UNUSED_A3FF.c */
+HANDLE_OPCODE(OP_UNUSED_A3FF)
+OP_END
+
+/* File: c/OP_UNUSED_A4FF.c */
+HANDLE_OPCODE(OP_UNUSED_A4FF)
+OP_END
+
+/* File: c/OP_UNUSED_A5FF.c */
+HANDLE_OPCODE(OP_UNUSED_A5FF)
+OP_END
+
+/* File: c/OP_UNUSED_A6FF.c */
+HANDLE_OPCODE(OP_UNUSED_A6FF)
+OP_END
+
+/* File: c/OP_UNUSED_A7FF.c */
+HANDLE_OPCODE(OP_UNUSED_A7FF)
+OP_END
+
+/* File: c/OP_UNUSED_A8FF.c */
+HANDLE_OPCODE(OP_UNUSED_A8FF)
+OP_END
+
+/* File: c/OP_UNUSED_A9FF.c */
+HANDLE_OPCODE(OP_UNUSED_A9FF)
+OP_END
+
+/* File: c/OP_UNUSED_AAFF.c */
+HANDLE_OPCODE(OP_UNUSED_AAFF)
+OP_END
+
+/* File: c/OP_UNUSED_ABFF.c */
+HANDLE_OPCODE(OP_UNUSED_ABFF)
+OP_END
+
+/* File: c/OP_UNUSED_ACFF.c */
+HANDLE_OPCODE(OP_UNUSED_ACFF)
+OP_END
+
+/* File: c/OP_UNUSED_ADFF.c */
+HANDLE_OPCODE(OP_UNUSED_ADFF)
+OP_END
+
+/* File: c/OP_UNUSED_AEFF.c */
+HANDLE_OPCODE(OP_UNUSED_AEFF)
+OP_END
+
+/* File: c/OP_UNUSED_AFFF.c */
+HANDLE_OPCODE(OP_UNUSED_AFFF)
+OP_END
+
+/* File: c/OP_UNUSED_B0FF.c */
+HANDLE_OPCODE(OP_UNUSED_B0FF)
+OP_END
+
+/* File: c/OP_UNUSED_B1FF.c */
+HANDLE_OPCODE(OP_UNUSED_B1FF)
+OP_END
+
+/* File: c/OP_UNUSED_B2FF.c */
+HANDLE_OPCODE(OP_UNUSED_B2FF)
+OP_END
+
+/* File: c/OP_UNUSED_B3FF.c */
+HANDLE_OPCODE(OP_UNUSED_B3FF)
+OP_END
+
+/* File: c/OP_UNUSED_B4FF.c */
+HANDLE_OPCODE(OP_UNUSED_B4FF)
+OP_END
+
+/* File: c/OP_UNUSED_B5FF.c */
+HANDLE_OPCODE(OP_UNUSED_B5FF)
+OP_END
+
+/* File: c/OP_UNUSED_B6FF.c */
+HANDLE_OPCODE(OP_UNUSED_B6FF)
+OP_END
+
+/* File: c/OP_UNUSED_B7FF.c */
+HANDLE_OPCODE(OP_UNUSED_B7FF)
+OP_END
+
+/* File: c/OP_UNUSED_B8FF.c */
+HANDLE_OPCODE(OP_UNUSED_B8FF)
+OP_END
+
+/* File: c/OP_UNUSED_B9FF.c */
+HANDLE_OPCODE(OP_UNUSED_B9FF)
+OP_END
+
+/* File: c/OP_UNUSED_BAFF.c */
+HANDLE_OPCODE(OP_UNUSED_BAFF)
+OP_END
+
+/* File: c/OP_UNUSED_BBFF.c */
+HANDLE_OPCODE(OP_UNUSED_BBFF)
+OP_END
+
+/* File: c/OP_UNUSED_BCFF.c */
+HANDLE_OPCODE(OP_UNUSED_BCFF)
+OP_END
+
+/* File: c/OP_UNUSED_BDFF.c */
+HANDLE_OPCODE(OP_UNUSED_BDFF)
+OP_END
+
+/* File: c/OP_UNUSED_BEFF.c */
+HANDLE_OPCODE(OP_UNUSED_BEFF)
+OP_END
+
+/* File: c/OP_UNUSED_BFFF.c */
+HANDLE_OPCODE(OP_UNUSED_BFFF)
+OP_END
+
+/* File: c/OP_UNUSED_C0FF.c */
+HANDLE_OPCODE(OP_UNUSED_C0FF)
+OP_END
+
+/* File: c/OP_UNUSED_C1FF.c */
+HANDLE_OPCODE(OP_UNUSED_C1FF)
+OP_END
+
+/* File: c/OP_UNUSED_C2FF.c */
+HANDLE_OPCODE(OP_UNUSED_C2FF)
+OP_END
+
+/* File: c/OP_UNUSED_C3FF.c */
+HANDLE_OPCODE(OP_UNUSED_C3FF)
+OP_END
+
+/* File: c/OP_UNUSED_C4FF.c */
+HANDLE_OPCODE(OP_UNUSED_C4FF)
+OP_END
+
+/* File: c/OP_UNUSED_C5FF.c */
+HANDLE_OPCODE(OP_UNUSED_C5FF)
+OP_END
+
+/* File: c/OP_UNUSED_C6FF.c */
+HANDLE_OPCODE(OP_UNUSED_C6FF)
+OP_END
+
+/* File: c/OP_UNUSED_C7FF.c */
+HANDLE_OPCODE(OP_UNUSED_C7FF)
+OP_END
+
+/* File: c/OP_UNUSED_C8FF.c */
+HANDLE_OPCODE(OP_UNUSED_C8FF)
+OP_END
+
+/* File: c/OP_UNUSED_C9FF.c */
+HANDLE_OPCODE(OP_UNUSED_C9FF)
+OP_END
+
+/* File: c/OP_UNUSED_CAFF.c */
+HANDLE_OPCODE(OP_UNUSED_CAFF)
+OP_END
+
+/* File: c/OP_UNUSED_CBFF.c */
+HANDLE_OPCODE(OP_UNUSED_CBFF)
+OP_END
+
+/* File: c/OP_UNUSED_CCFF.c */
+HANDLE_OPCODE(OP_UNUSED_CCFF)
+OP_END
+
+/* File: c/OP_UNUSED_CDFF.c */
+HANDLE_OPCODE(OP_UNUSED_CDFF)
+OP_END
+
+/* File: c/OP_UNUSED_CEFF.c */
+HANDLE_OPCODE(OP_UNUSED_CEFF)
+OP_END
+
+/* File: c/OP_UNUSED_CFFF.c */
+HANDLE_OPCODE(OP_UNUSED_CFFF)
+OP_END
+
+/* File: c/OP_UNUSED_D0FF.c */
+HANDLE_OPCODE(OP_UNUSED_D0FF)
+OP_END
+
+/* File: c/OP_UNUSED_D1FF.c */
+HANDLE_OPCODE(OP_UNUSED_D1FF)
+OP_END
+
+/* File: c/OP_UNUSED_D2FF.c */
+HANDLE_OPCODE(OP_UNUSED_D2FF)
+OP_END
+
+/* File: c/OP_UNUSED_D3FF.c */
+HANDLE_OPCODE(OP_UNUSED_D3FF)
+OP_END
+
+/* File: c/OP_UNUSED_D4FF.c */
+HANDLE_OPCODE(OP_UNUSED_D4FF)
+OP_END
+
+/* File: c/OP_UNUSED_D5FF.c */
+HANDLE_OPCODE(OP_UNUSED_D5FF)
+OP_END
+
+/* File: c/OP_UNUSED_D6FF.c */
+HANDLE_OPCODE(OP_UNUSED_D6FF)
+OP_END
+
+/* File: c/OP_UNUSED_D7FF.c */
+HANDLE_OPCODE(OP_UNUSED_D7FF)
+OP_END
+
+/* File: c/OP_UNUSED_D8FF.c */
+HANDLE_OPCODE(OP_UNUSED_D8FF)
+OP_END
+
+/* File: c/OP_UNUSED_D9FF.c */
+HANDLE_OPCODE(OP_UNUSED_D9FF)
+OP_END
+
+/* File: c/OP_UNUSED_DAFF.c */
+HANDLE_OPCODE(OP_UNUSED_DAFF)
+OP_END
+
+/* File: c/OP_UNUSED_DBFF.c */
+HANDLE_OPCODE(OP_UNUSED_DBFF)
+OP_END
+
+/* File: c/OP_UNUSED_DCFF.c */
+HANDLE_OPCODE(OP_UNUSED_DCFF)
+OP_END
+
+/* File: c/OP_UNUSED_DDFF.c */
+HANDLE_OPCODE(OP_UNUSED_DDFF)
+OP_END
+
+/* File: c/OP_UNUSED_DEFF.c */
+HANDLE_OPCODE(OP_UNUSED_DEFF)
+OP_END
+
+/* File: c/OP_UNUSED_DFFF.c */
+HANDLE_OPCODE(OP_UNUSED_DFFF)
+OP_END
+
+/* File: c/OP_UNUSED_E0FF.c */
+HANDLE_OPCODE(OP_UNUSED_E0FF)
+OP_END
+
+/* File: c/OP_UNUSED_E1FF.c */
+HANDLE_OPCODE(OP_UNUSED_E1FF)
+OP_END
+
+/* File: c/OP_UNUSED_E2FF.c */
+HANDLE_OPCODE(OP_UNUSED_E2FF)
+OP_END
+
+/* File: c/OP_UNUSED_E3FF.c */
+HANDLE_OPCODE(OP_UNUSED_E3FF)
+OP_END
+
+/* File: c/OP_UNUSED_E4FF.c */
+HANDLE_OPCODE(OP_UNUSED_E4FF)
+OP_END
+
+/* File: c/OP_UNUSED_E5FF.c */
+HANDLE_OPCODE(OP_UNUSED_E5FF)
+OP_END
+
+/* File: c/OP_UNUSED_E6FF.c */
+HANDLE_OPCODE(OP_UNUSED_E6FF)
+OP_END
+
+/* File: c/OP_UNUSED_E7FF.c */
+HANDLE_OPCODE(OP_UNUSED_E7FF)
+OP_END
+
+/* File: c/OP_UNUSED_E8FF.c */
+HANDLE_OPCODE(OP_UNUSED_E8FF)
+OP_END
+
+/* File: c/OP_UNUSED_E9FF.c */
+HANDLE_OPCODE(OP_UNUSED_E9FF)
+OP_END
+
+/* File: c/OP_UNUSED_EAFF.c */
+HANDLE_OPCODE(OP_UNUSED_EAFF)
+OP_END
+
+/* File: c/OP_UNUSED_EBFF.c */
+HANDLE_OPCODE(OP_UNUSED_EBFF)
+OP_END
+
+/* File: c/OP_UNUSED_ECFF.c */
+HANDLE_OPCODE(OP_UNUSED_ECFF)
+OP_END
+
+/* File: c/OP_UNUSED_EDFF.c */
+HANDLE_OPCODE(OP_UNUSED_EDFF)
+OP_END
+
+/* File: c/OP_UNUSED_EEFF.c */
+HANDLE_OPCODE(OP_UNUSED_EEFF)
+OP_END
+
+/* File: c/OP_UNUSED_EFFF.c */
+HANDLE_OPCODE(OP_UNUSED_EFFF)
+OP_END
+
+/* File: c/OP_UNUSED_F0FF.c */
+HANDLE_OPCODE(OP_UNUSED_F0FF)
+OP_END
+
+/* File: c/OP_UNUSED_F1FF.c */
+HANDLE_OPCODE(OP_UNUSED_F1FF)
+    /*
      * In portable interp, most unused opcodes will fall through to here.
      */
-    LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+    LOGE("unknown opcode 0x%04x\n", inst);
     dvmAbort();
     FINISH(1);
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
+
+/* File: c/OP_IGET_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_THROW_VERIFICATION_ERROR_JUMBO.c */
+HANDLE_OPCODE(OP_THROW_VERIFICATION_ERROR_JUMBO)
+    EXPORT_PC();
+    vsrc1 = FETCH(3);
+    ref = FETCH(1) | (u4)FETCH(2) << 16;      /* class/field/method ref */
+    dvmThrowVerificationError(curMethod, vsrc1, ref);
+    GOTO_exceptionThrown();
+OP_END
+
 /* File: cstubs/entry.c */
 /*
  * Handler function table, one entry per opcode.
@@ -3066,12 +4398,12 @@
  *
  * This is only used for the "allstubs" variant.
  */
-bool dvmMterpStdRun(MterpGlue* glue)
+bool dvmMterpStdRun(Thread* self)
 {
     jmp_buf jmpBuf;
     int changeInterp;
 
-    glue->bailPtr = &jmpBuf;
+    self->bailPtr = &jmpBuf;
 
     /*
      * We want to return "changeInterp" as a boolean, but we can't return
@@ -3089,18 +4421,18 @@
      * We need to pick up where the other interpreter left off.
      *
      * In some cases we need to call into a throw/return handler which
-     * will do some processing and then either return to us (updating "glue")
+     * will do some processing and then either return to us (updating "self")
      * or longjmp back out.
      */
-    switch (glue->entryPoint) {
+    switch (self->entryPoint) {
     case kInterpEntryInstr:
         /* just start at the start */
         break;
     case kInterpEntryReturn:
-        dvmMterp_returnFromMethod(glue);
+        dvmMterp_returnFromMethod(self);
         break;
     case kInterpEntryThrow:
-        dvmMterp_exceptionThrown(glue);
+        dvmMterp_exceptionThrown(self);
         break;
     default:
         dvmAbort();
@@ -3108,23 +4440,23 @@
 
     /* run until somebody longjmp()s out */
     while (true) {
-        typedef void (*Handler)(MterpGlue* glue);
+        typedef void (*Handler)(Thread* self);
 
-        u2 inst = /*glue->*/pc[0];
+        u2 inst = /*self->*/pc[0];
         Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
         (void) gDvmMterpHandlerNames;   /* avoid gcc "defined but not used" */
         LOGVV("handler %p %s\n",
             handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]);
-        (*handler)(glue);
+        (*handler)(self);
     }
 }
 
 /*
  * C mterp exit point.  Call here to bail out of the interpreter.
  */
-void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+void dvmMterpStdBail(Thread* self, bool changeInterp)
 {
-    jmp_buf* pJmpBuf = glue->bailPtr;
+    jmp_buf* pJmpBuf = self->bailPtr;
     longjmp(*pJmpBuf, ((int)changeInterp)+1);
 }
 
@@ -3139,7 +4471,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -3150,19 +4482,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -3176,7 +4517,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -3192,13 +4533,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -3231,35 +4571,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -3300,8 +4654,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3331,26 +4684,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -3385,15 +4749,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3409,32 +4771,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -3459,25 +4832,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -3494,19 +4878,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -3523,13 +4916,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -3566,13 +4959,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3588,7 +4980,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -3613,11 +5005,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -3633,8 +5025,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3682,7 +5073,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -3736,8 +5127,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -3769,7 +5160,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -3794,7 +5185,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -4027,7 +5418,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -4040,18 +5431,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -4078,7 +5465,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
diff --git a/vm/mterp/out/InterpC-armv5te-vfp.c b/vm/mterp/out/InterpC-armv5te-vfp.c
index 2ccc43c..a0894d2 100644
--- a/vm/mterp/out/InterpC-armv5te-vfp.c
+++ b/vm/mterp/out/InterpC-armv5te-vfp.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: cstubs/enddefs.c */
 
 /* undefine "magic" name remapping */
@@ -1222,7 +1320,7 @@
 {
     register uint32_t rPC       asm("r4");
     register uint32_t rFP       asm("r5");
-    register uint32_t rGLUE     asm("r6");
+    register uint32_t rSELF     asm("r6");
     register uint32_t rINST     asm("r7");
     register uint32_t rIBASE    asm("r8");
     register uint32_t r9        asm("r9");
@@ -1231,12 +1329,12 @@
     //extern char dvmAsmInstructionStart[];
 
     printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
-    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rINST=%08x\n",
-        rPC, rFP, rGLUE, rINST);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+        rPC, rFP, rSELF, rINST);
     printf("    : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
 
-    //MterpGlue* glue = (MterpGlue*) rGLUE;
-    //const Method* method = glue->method;
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
     printf("    + self is %p\n", dvmThreadSelf());
     //printf("    + currently in %s.%s %s\n",
     //    method->clazz->descriptor, method->name, method->shorty);
diff --git a/vm/mterp/out/InterpC-armv5te.c b/vm/mterp/out/InterpC-armv5te.c
index fcd2182..e189ca3 100644
--- a/vm/mterp/out/InterpC-armv5te.c
+++ b/vm/mterp/out/InterpC-armv5te.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: cstubs/enddefs.c */
 
 /* undefine "magic" name remapping */
@@ -1222,7 +1320,7 @@
 {
     register uint32_t rPC       asm("r4");
     register uint32_t rFP       asm("r5");
-    register uint32_t rGLUE     asm("r6");
+    register uint32_t rSELF     asm("r6");
     register uint32_t rINST     asm("r7");
     register uint32_t rIBASE    asm("r8");
     register uint32_t r9        asm("r9");
@@ -1231,12 +1329,12 @@
     //extern char dvmAsmInstructionStart[];
 
     printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
-    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rINST=%08x\n",
-        rPC, rFP, rGLUE, rINST);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+        rPC, rFP, rSELF, rINST);
     printf("    : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
 
-    //MterpGlue* glue = (MterpGlue*) rGLUE;
-    //const Method* method = glue->method;
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
     printf("    + self is %p\n", dvmThreadSelf());
     //printf("    + currently in %s.%s %s\n",
     //    method->clazz->descriptor, method->name, method->shorty);
diff --git a/vm/mterp/out/InterpC-armv7-a-neon.c b/vm/mterp/out/InterpC-armv7-a-neon.c
index edb59f9..e63e577 100644
--- a/vm/mterp/out/InterpC-armv7-a-neon.c
+++ b/vm/mterp/out/InterpC-armv7-a-neon.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: cstubs/enddefs.c */
 
 /* undefine "magic" name remapping */
@@ -1222,7 +1320,7 @@
 {
     register uint32_t rPC       asm("r4");
     register uint32_t rFP       asm("r5");
-    register uint32_t rGLUE     asm("r6");
+    register uint32_t rSELF     asm("r6");
     register uint32_t rINST     asm("r7");
     register uint32_t rIBASE    asm("r8");
     register uint32_t r9        asm("r9");
@@ -1231,12 +1329,12 @@
     //extern char dvmAsmInstructionStart[];
 
     printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
-    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rINST=%08x\n",
-        rPC, rFP, rGLUE, rINST);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+        rPC, rFP, rSELF, rINST);
     printf("    : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
 
-    //MterpGlue* glue = (MterpGlue*) rGLUE;
-    //const Method* method = glue->method;
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
     printf("    + self is %p\n", dvmThreadSelf());
     //printf("    + currently in %s.%s %s\n",
     //    method->clazz->descriptor, method->name, method->shorty);
diff --git a/vm/mterp/out/InterpC-armv7-a.c b/vm/mterp/out/InterpC-armv7-a.c
index 80a8320..f978a7e 100644
--- a/vm/mterp/out/InterpC-armv7-a.c
+++ b/vm/mterp/out/InterpC-armv7-a.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: cstubs/enddefs.c */
 
 /* undefine "magic" name remapping */
@@ -1222,7 +1320,7 @@
 {
     register uint32_t rPC       asm("r4");
     register uint32_t rFP       asm("r5");
-    register uint32_t rGLUE     asm("r6");
+    register uint32_t rSELF     asm("r6");
     register uint32_t rINST     asm("r7");
     register uint32_t rIBASE    asm("r8");
     register uint32_t r9        asm("r9");
@@ -1231,12 +1329,12 @@
     //extern char dvmAsmInstructionStart[];
 
     printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
-    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rINST=%08x\n",
-        rPC, rFP, rGLUE, rINST);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+        rPC, rFP, rSELF, rINST);
     printf("    : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
 
-    //MterpGlue* glue = (MterpGlue*) rGLUE;
-    //const Method* method = glue->method;
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
     printf("    + self is %p\n", dvmThreadSelf());
     //printf("    + currently in %s.%s %s\n",
     //    method->clazz->descriptor, method->name, method->shorty);
diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c
index bf1825c..001ebc3 100644
--- a/vm/mterp/out/InterpC-portdbg.c
+++ b/vm/mterp/out/InterpC-portdbg.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -419,15 +418,15 @@
     checkDebugAndProf(pc, fp, self, curMethod, &debugIsMethodEntry)
 
 #if defined(WITH_JIT)
-#define CHECK_JIT_BOOL() (dvmCheckJit(pc, self, interpState, callsiteClass,\
+#define CHECK_JIT_BOOL() (dvmCheckJit(pc, self, callsiteClass,\
                           methodToCall))
-#define CHECK_JIT_VOID() (dvmCheckJit(pc, self, interpState, callsiteClass,\
+#define CHECK_JIT_VOID() (dvmCheckJit(pc, self, callsiteClass,\
                           methodToCall))
-#define ABORT_JIT_TSELECT() (dvmJitAbortTraceSelect(interpState))
+#define END_JIT_TSELECT() (dvmJitEndTraceSelect(self))
 #else
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT(x) ((void)0)
+#define END_JIT_TSELECT(x) ((void)0)
 #endif
 
 /* File: portable/stubdefs.c */
@@ -469,10 +468,14 @@
 # define FINISH_BKPT(_opcode) {                                             \
         goto *handlerTable[_opcode];                                        \
     }
+# define DISPATCH_EXTENDED(_opcode) {                                       \
+        goto *handlerTable[0x100 + _opcode];                                \
+    }
 #else
 # define HANDLE_OPCODE(_op) case _op:
 # define FINISH(_offset)    { ADJUST_PC(_offset); break; }
 # define FINISH_BKPT(opcode) { > not implemented < }
+# define DISPATCH_EXTENDED(opcode) goto case (0x100 + opcode);
 #endif
 
 #define OP_END
@@ -494,9 +497,10 @@
 
 #define GOTO_returnFromMethod() goto returnFromMethod;
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
         methodCallRange = _methodCallRange;                                 \
+        jumboFormat = _jumboFormat;                                         \
         goto _target;                                                       \
     } while(false)
 
@@ -519,10 +523,10 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            interpState->entryPoint = _entryPoint;                          \
+            self->entryPoint = _entryPoint;                          \
             LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n",               \
                 self->threadId,                                             \
-                (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",      \
+                (self->nextMode == INTERP_STD) ? "STD" : "DBG",      \
                 (_entryPoint), (_pcadj));                                   \
             GOTO_bail_switch();                                             \
         }                                                                   \
@@ -530,14 +534,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -680,8 +684,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -727,9 +730,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -762,8 +764,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -808,8 +809,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -851,8 +851,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -898,8 +897,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -989,7 +987,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1013,7 +1012,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1066,6 +1066,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1111,6 +1139,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1148,7 +1204,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1158,6 +1214,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1172,7 +1252,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1182,6 +1262,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: portable/debug.c */
 /* code in here is only included in portable-debug interpreter */
 
@@ -1379,7 +1483,7 @@
         static const char* mn = "shiftTest2";
         static const char* sg = "()V";
 
-        if (/*gDvm.debuggerActive &&*/
+        if (/*DEBUGGER_ACTIVE &&*/
             strcmp(method->clazz->descriptor, cd) == 0 &&
             strcmp(method->name, mn) == 0 &&
             strcmp(method->shorty, sg) == 0)
@@ -1390,7 +1494,7 @@
             dumpRegs(method, fp, true);
         }
 
-        if (!gDvm.debuggerActive)
+        if (!DEBUGGER_ACTIVE)
             *pIsMethodEntry = false;
     }
 #endif
@@ -1407,7 +1511,7 @@
         *pIsMethodEntry = false;
         TRACE_METHOD_ENTER(self, method);
     }
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         updateDebugger(method, pc, fp, isEntry, self);
     }
     if (gDvm.instructionCountEnableCount != 0) {
@@ -1429,17 +1533,17 @@
  *
  * This was written with an ARM implementation in mind.
  */
-bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+bool INTERP_FUNC_NAME(Thread* self)
 {
 #if defined(EASY_GDB)
     StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     bool debugIsMethodEntry = false;
-    debugIsMethodEntry = interpState->debugIsMethodEntry;
+    debugIsMethodEntry = self->debugIsMethodEntry;
 #endif
 #if defined(WITH_TRACKREF_CHECKS)
-    int debugTrackedRefStart = interpState->debugTrackedRefStart;
+    int debugTrackedRefStart = self->interpSave.debugTrackedRefStart;
 #endif
     DvmDex* methodClassDex;     // curMethod->clazz->pDvmDex
     JValue retval;
@@ -1450,11 +1554,12 @@
     u4* fp;                     // frame pointer
     u2 inst;                    // current instruction
     /* instruction decoding */
-    u2 ref;                     // 16-bit quantity fetched directly
+    u4 ref;                     // 16 or 32-bit quantity fetched directly
     u2 vsrc1, vsrc2, vdst;      // usually used for register indexes
     /* method call setup */
     const Method* methodToCall;
     bool methodCallRange;
+    bool jumboFormat;
 
 
 #if defined(THREADED_INTERP)
@@ -1465,16 +1570,16 @@
 #if defined(WITH_JIT)
 #if 0
     LOGD("*DebugInterp - entrypoint is %d, tgt is 0x%x, %s\n",
-         interpState->entryPoint,
-         interpState->pc,
-         interpState->method->name);
+         self->entryPoint,
+         self->interpSave.pc,
+         self->interpSave.method->name);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     const ClassObject* callsiteClass = NULL;
 
 #if defined(WITH_SELF_VERIFICATION)
-    if (interpState->jitState != kJitSelfVerification) {
-        interpState->self->shadowSpace->jitExitState = kSVSIdle;
+    if (self->jitState != kJitSelfVerification) {
+        self->shadowSpace->jitExitState = kSVSIdle;
     }
 #endif
 
@@ -1487,11 +1592,11 @@
           * dvmJitCheckTraceRequest will change the jitState to kJitDone but
           * but stay in the dbg interpreter.
           */
-         (interpState->entryPoint == kInterpEntryInstr) &&
-         (interpState->jitState == kJitTSelectRequest ||
-          interpState->jitState == kJitTSelectRequestHot) &&
-         dvmJitCheckTraceRequest(self, interpState)) {
-        interpState->nextMode = INTERP_STD;
+         (self->entryPoint == kInterpEntryInstr) &&
+         (self->jitState == kJitTSelectRequest ||
+          self->jitState == kJitTSelectRequestHot) &&
+         dvmJitCheckTraceRequest(self)) {
+        self->nextMode = INTERP_STD;
         //LOGD("Invalid trace request, exiting\n");
         return true;
     }
@@ -1499,17 +1604,17 @@
 #endif /* WITH_JIT */
 
     /* copy state in */
-    curMethod = interpState->method;
-    pc = interpState->pc;
-    fp = interpState->fp;
-    retval = interpState->retval;   /* only need for kInterpEntryReturn? */
+    curMethod = self->interpSave.method;
+    pc = self->interpSave.pc;
+    fp = self->interpSave.fp;
+    retval = self->retval;   /* only need for kInterpEntryReturn? */
 
     methodClassDex = curMethod->clazz->pDvmDex;
 
     LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
-        self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+        self->threadId, (self->nextMode == INTERP_STD) ? "STD" : "DBG",
         curMethod->clazz->descriptor, curMethod->name, pc - curMethod->insns,
-        fp, interpState->entryPoint);
+        fp, self->entryPoint);
 
     /*
      * DEBUG: scramble this to ensure we're not relying on it.
@@ -1520,11 +1625,11 @@
     if (debugIsMethodEntry) {
         ILOGD("|-- Now interpreting %s.%s", curMethod->clazz->descriptor,
                 curMethod->name);
-        DUMP_REGS(curMethod, interpState->fp, false);
+        DUMP_REGS(curMethod, self->interpSave.fp, false);
     }
 #endif
 
-    switch (interpState->entryPoint) {
+    switch (self->entryPoint) {
     case kInterpEntryInstr:
         /* just fall through to instruction loop or threaded kickstart */
         break;
@@ -1905,12 +2010,8 @@
         if (!checkForNullExportPC(obj, fp, pc))
             GOTO_exceptionThrown();
         ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
-        EXPORT_PC();    /* need for precise GC, also WITH_MONITOR_TRACKING */
+        EXPORT_PC();    /* need for precise GC */
         dvmLockObject(self, obj);
-#ifdef WITH_DEADLOCK_PREDICTION
-        if (dvmCheckException(self))
-            GOTO_exceptionThrown();
-#endif
     }
     FINISH(1);
 OP_END
@@ -2056,15 +2157,15 @@
          * check is not needed for mterp.
          */
         if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
-            /* Class initialization is still ongoing - abandon the trace */
-            ABORT_JIT_TSELECT();
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
         }
 
         /*
          * Verifier now tests for interface/abstract class.
          */
         //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
-        //    dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
         //        clazz->descriptor);
         //    GOTO_exceptionThrown();
         //}
@@ -2092,7 +2193,7 @@
             vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
         length = (s4) GET_REGISTER(vsrc1);
         if (length < 0) {
-            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            dvmThrowNegativeArraySizeException(length);
             GOTO_exceptionThrown();
         }
         arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
@@ -2115,12 +2216,12 @@
 
 /* File: c/OP_FILLED_NEW_ARRAY.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
-    GOTO_invoke(filledNewArray, false);
+    GOTO_invoke(filledNewArray, false, false);
 OP_END
 
 /* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
-    GOTO_invoke(filledNewArray, true);
+    GOTO_invoke(filledNewArray, true, false);
 OP_END
 
 /* File: c/OP_FILL_ARRAY_DATA.c */
@@ -2140,8 +2241,7 @@
             arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
         {
             /* should have been caught in verifier */
-            dvmThrowException("Ljava/lang/InternalError;",
-                              "bad fill array data");
+            dvmThrowInternalError("bad fill array data");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2242,7 +2342,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            dvmThrowInternalError("bad packed switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2273,7 +2373,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            dvmThrowInternalError("bad sparse switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2431,7 +2531,8 @@
         if (!checkForNull((Object*) arrayObj))
             GOTO_exceptionThrown();
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);
+            dvmThrowArrayIndexOutOfBoundsException(
+                arrayObj->length, GET_REGISTER(vsrc2));
             GOTO_exceptionThrown();
         }
         obj = (Object*) GET_REGISTER(vdst);
@@ -2595,27 +2696,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtual, false);
+    GOTO_invoke(invokeVirtual, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuper, false);
+    GOTO_invoke(invokeSuper, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeDirect, false);
+    GOTO_invoke(invokeDirect, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeStatic, false);
+    GOTO_invoke(invokeStatic, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeInterface, false);
+    GOTO_invoke(invokeInterface, false, false);
 OP_END
 
 /* File: c/OP_UNUSED_73.c */
@@ -2624,27 +2725,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtual, true);
+    GOTO_invoke(invokeVirtual, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuper, true);
+    GOTO_invoke(invokeSuper, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeDirect, true);
+    GOTO_invoke(invokeDirect, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeStatic, true);
+    GOTO_invoke(invokeStatic, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeInterface, true);
+    GOTO_invoke(invokeInterface, true, false);
 OP_END
 
 /* File: c/OP_UNUSED_79.c */
@@ -3309,21 +3410,34 @@
     FINISH(3);
 OP_END
 
-/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
-HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-#if INTERP_TYPE != INTERP_DBG
-    //LOGI("Ignoring empty\n");
-    FINISH(3);
-#else
-    if (!gDvm.debuggerActive) {
-        //LOGI("Skipping empty\n");
-        FINISH(3);      // don't want it to show up in profiler output
-    } else {
-        //LOGI("Running empty\n");
-        /* fall through to OP_INVOKE_DIRECT */
-        GOTO_invoke(invokeDirect, false);
-    }
+/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
 #endif
+        FINISH(3);
+    }
 OP_END
 
 /* File: c/OP_RETURN_VOID_BARRIER.c */
@@ -3362,22 +3476,22 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtualQuick, false);
+    GOTO_invoke(invokeVirtualQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtualQuick, true);
+    GOTO_invoke(invokeVirtualQuick, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuperQuick, false);
+    GOTO_invoke(invokeSuperQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuperQuick, true);
+    GOTO_invoke(invokeSuperQuick, true, false);
 OP_END
 
 /* File: c/OP_IPUT_OBJECT_VOLATILE.c */
@@ -3395,13 +3509,1238 @@
 /* File: c/OP_DISPATCH_FF.c */
 HANDLE_OPCODE(OP_DISPATCH_FF)
     /*
+     * Indicates extended opcode.  Use next 8 bits to choose where to branch.
+     */
+    DISPATCH_EXTENDED(INST_AA(inst));
+OP_END
+
+/* File: c/OP_CONST_CLASS_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_CLASS_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|const-class/jumbo v%d class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(curMethod->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_CHECK_CAST_JUMBO.c */
+HANDLE_OPCODE(OP_CHECK_CAST_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vsrc1 = FETCH(3);
+        ILOGV("|check-cast/jumbo v%d,class@0x%08x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(curMethod->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowClassCastException(obj->clazz, clazz);
+                GOTO_exceptionThrown();
+            }
+        }
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_INSTANCE_OF_JUMBO.c */
+HANDLE_OPCODE(OP_INSTANCE_OF_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);   /* object to check */
+        ILOGV("|instance-of/jumbo v%d,v%d,class@0x%08x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(curMethod->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|new-instance/jumbo v%d,class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(curMethod->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO_exceptionThrown();
+
+        /*
+         * The JIT needs dvmDexGetResolvedClass() to return non-null.
+         * Since we use the portable interpreter to build the trace, this extra
+         * check is not needed for mterp.
+         */
+        if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
+        }
+
+        /*
+         * Verifier now tests for interface/abstract class.
+         */
+        //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
+        //        clazz->descriptor);
+        //    GOTO_exceptionThrown();
+        //}
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_ARRAY_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);       /* length reg */
+        ILOGV("|new-array/jumbo v%d,v%d,class@0x%08x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowNegativeArraySizeException(length);
+            GOTO_exceptionThrown();
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO_exceptionThrown();
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, class@AAAAAAAA*/)
+    GOTO_invoke(filledNewArray, true, true);
+OP_END
+
+/* File: c/OP_IGET_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_JUMBO.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible.  In practice, many popular VMs don't
+ * do this because it slows down a very common operation.  It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SGET_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BOOLEAN_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeVirtual, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeSuper, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeDirect, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeStatic, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeInterface, true, true);
+OP_END
+
+/* File: c/OP_UNUSED_27FF.c */
+HANDLE_OPCODE(OP_UNUSED_27FF)
+OP_END
+
+/* File: c/OP_UNUSED_28FF.c */
+HANDLE_OPCODE(OP_UNUSED_28FF)
+OP_END
+
+/* File: c/OP_UNUSED_29FF.c */
+HANDLE_OPCODE(OP_UNUSED_29FF)
+OP_END
+
+/* File: c/OP_UNUSED_2AFF.c */
+HANDLE_OPCODE(OP_UNUSED_2AFF)
+OP_END
+
+/* File: c/OP_UNUSED_2BFF.c */
+HANDLE_OPCODE(OP_UNUSED_2BFF)
+OP_END
+
+/* File: c/OP_UNUSED_2CFF.c */
+HANDLE_OPCODE(OP_UNUSED_2CFF)
+OP_END
+
+/* File: c/OP_UNUSED_2DFF.c */
+HANDLE_OPCODE(OP_UNUSED_2DFF)
+OP_END
+
+/* File: c/OP_UNUSED_2EFF.c */
+HANDLE_OPCODE(OP_UNUSED_2EFF)
+OP_END
+
+/* File: c/OP_UNUSED_2FFF.c */
+HANDLE_OPCODE(OP_UNUSED_2FFF)
+OP_END
+
+/* File: c/OP_UNUSED_30FF.c */
+HANDLE_OPCODE(OP_UNUSED_30FF)
+OP_END
+
+/* File: c/OP_UNUSED_31FF.c */
+HANDLE_OPCODE(OP_UNUSED_31FF)
+OP_END
+
+/* File: c/OP_UNUSED_32FF.c */
+HANDLE_OPCODE(OP_UNUSED_32FF)
+OP_END
+
+/* File: c/OP_UNUSED_33FF.c */
+HANDLE_OPCODE(OP_UNUSED_33FF)
+OP_END
+
+/* File: c/OP_UNUSED_34FF.c */
+HANDLE_OPCODE(OP_UNUSED_34FF)
+OP_END
+
+/* File: c/OP_UNUSED_35FF.c */
+HANDLE_OPCODE(OP_UNUSED_35FF)
+OP_END
+
+/* File: c/OP_UNUSED_36FF.c */
+HANDLE_OPCODE(OP_UNUSED_36FF)
+OP_END
+
+/* File: c/OP_UNUSED_37FF.c */
+HANDLE_OPCODE(OP_UNUSED_37FF)
+OP_END
+
+/* File: c/OP_UNUSED_38FF.c */
+HANDLE_OPCODE(OP_UNUSED_38FF)
+OP_END
+
+/* File: c/OP_UNUSED_39FF.c */
+HANDLE_OPCODE(OP_UNUSED_39FF)
+OP_END
+
+/* File: c/OP_UNUSED_3AFF.c */
+HANDLE_OPCODE(OP_UNUSED_3AFF)
+OP_END
+
+/* File: c/OP_UNUSED_3BFF.c */
+HANDLE_OPCODE(OP_UNUSED_3BFF)
+OP_END
+
+/* File: c/OP_UNUSED_3CFF.c */
+HANDLE_OPCODE(OP_UNUSED_3CFF)
+OP_END
+
+/* File: c/OP_UNUSED_3DFF.c */
+HANDLE_OPCODE(OP_UNUSED_3DFF)
+OP_END
+
+/* File: c/OP_UNUSED_3EFF.c */
+HANDLE_OPCODE(OP_UNUSED_3EFF)
+OP_END
+
+/* File: c/OP_UNUSED_3FFF.c */
+HANDLE_OPCODE(OP_UNUSED_3FFF)
+OP_END
+
+/* File: c/OP_UNUSED_40FF.c */
+HANDLE_OPCODE(OP_UNUSED_40FF)
+OP_END
+
+/* File: c/OP_UNUSED_41FF.c */
+HANDLE_OPCODE(OP_UNUSED_41FF)
+OP_END
+
+/* File: c/OP_UNUSED_42FF.c */
+HANDLE_OPCODE(OP_UNUSED_42FF)
+OP_END
+
+/* File: c/OP_UNUSED_43FF.c */
+HANDLE_OPCODE(OP_UNUSED_43FF)
+OP_END
+
+/* File: c/OP_UNUSED_44FF.c */
+HANDLE_OPCODE(OP_UNUSED_44FF)
+OP_END
+
+/* File: c/OP_UNUSED_45FF.c */
+HANDLE_OPCODE(OP_UNUSED_45FF)
+OP_END
+
+/* File: c/OP_UNUSED_46FF.c */
+HANDLE_OPCODE(OP_UNUSED_46FF)
+OP_END
+
+/* File: c/OP_UNUSED_47FF.c */
+HANDLE_OPCODE(OP_UNUSED_47FF)
+OP_END
+
+/* File: c/OP_UNUSED_48FF.c */
+HANDLE_OPCODE(OP_UNUSED_48FF)
+OP_END
+
+/* File: c/OP_UNUSED_49FF.c */
+HANDLE_OPCODE(OP_UNUSED_49FF)
+OP_END
+
+/* File: c/OP_UNUSED_4AFF.c */
+HANDLE_OPCODE(OP_UNUSED_4AFF)
+OP_END
+
+/* File: c/OP_UNUSED_4BFF.c */
+HANDLE_OPCODE(OP_UNUSED_4BFF)
+OP_END
+
+/* File: c/OP_UNUSED_4CFF.c */
+HANDLE_OPCODE(OP_UNUSED_4CFF)
+OP_END
+
+/* File: c/OP_UNUSED_4DFF.c */
+HANDLE_OPCODE(OP_UNUSED_4DFF)
+OP_END
+
+/* File: c/OP_UNUSED_4EFF.c */
+HANDLE_OPCODE(OP_UNUSED_4EFF)
+OP_END
+
+/* File: c/OP_UNUSED_4FFF.c */
+HANDLE_OPCODE(OP_UNUSED_4FFF)
+OP_END
+
+/* File: c/OP_UNUSED_50FF.c */
+HANDLE_OPCODE(OP_UNUSED_50FF)
+OP_END
+
+/* File: c/OP_UNUSED_51FF.c */
+HANDLE_OPCODE(OP_UNUSED_51FF)
+OP_END
+
+/* File: c/OP_UNUSED_52FF.c */
+HANDLE_OPCODE(OP_UNUSED_52FF)
+OP_END
+
+/* File: c/OP_UNUSED_53FF.c */
+HANDLE_OPCODE(OP_UNUSED_53FF)
+OP_END
+
+/* File: c/OP_UNUSED_54FF.c */
+HANDLE_OPCODE(OP_UNUSED_54FF)
+OP_END
+
+/* File: c/OP_UNUSED_55FF.c */
+HANDLE_OPCODE(OP_UNUSED_55FF)
+OP_END
+
+/* File: c/OP_UNUSED_56FF.c */
+HANDLE_OPCODE(OP_UNUSED_56FF)
+OP_END
+
+/* File: c/OP_UNUSED_57FF.c */
+HANDLE_OPCODE(OP_UNUSED_57FF)
+OP_END
+
+/* File: c/OP_UNUSED_58FF.c */
+HANDLE_OPCODE(OP_UNUSED_58FF)
+OP_END
+
+/* File: c/OP_UNUSED_59FF.c */
+HANDLE_OPCODE(OP_UNUSED_59FF)
+OP_END
+
+/* File: c/OP_UNUSED_5AFF.c */
+HANDLE_OPCODE(OP_UNUSED_5AFF)
+OP_END
+
+/* File: c/OP_UNUSED_5BFF.c */
+HANDLE_OPCODE(OP_UNUSED_5BFF)
+OP_END
+
+/* File: c/OP_UNUSED_5CFF.c */
+HANDLE_OPCODE(OP_UNUSED_5CFF)
+OP_END
+
+/* File: c/OP_UNUSED_5DFF.c */
+HANDLE_OPCODE(OP_UNUSED_5DFF)
+OP_END
+
+/* File: c/OP_UNUSED_5EFF.c */
+HANDLE_OPCODE(OP_UNUSED_5EFF)
+OP_END
+
+/* File: c/OP_UNUSED_5FFF.c */
+HANDLE_OPCODE(OP_UNUSED_5FFF)
+OP_END
+
+/* File: c/OP_UNUSED_60FF.c */
+HANDLE_OPCODE(OP_UNUSED_60FF)
+OP_END
+
+/* File: c/OP_UNUSED_61FF.c */
+HANDLE_OPCODE(OP_UNUSED_61FF)
+OP_END
+
+/* File: c/OP_UNUSED_62FF.c */
+HANDLE_OPCODE(OP_UNUSED_62FF)
+OP_END
+
+/* File: c/OP_UNUSED_63FF.c */
+HANDLE_OPCODE(OP_UNUSED_63FF)
+OP_END
+
+/* File: c/OP_UNUSED_64FF.c */
+HANDLE_OPCODE(OP_UNUSED_64FF)
+OP_END
+
+/* File: c/OP_UNUSED_65FF.c */
+HANDLE_OPCODE(OP_UNUSED_65FF)
+OP_END
+
+/* File: c/OP_UNUSED_66FF.c */
+HANDLE_OPCODE(OP_UNUSED_66FF)
+OP_END
+
+/* File: c/OP_UNUSED_67FF.c */
+HANDLE_OPCODE(OP_UNUSED_67FF)
+OP_END
+
+/* File: c/OP_UNUSED_68FF.c */
+HANDLE_OPCODE(OP_UNUSED_68FF)
+OP_END
+
+/* File: c/OP_UNUSED_69FF.c */
+HANDLE_OPCODE(OP_UNUSED_69FF)
+OP_END
+
+/* File: c/OP_UNUSED_6AFF.c */
+HANDLE_OPCODE(OP_UNUSED_6AFF)
+OP_END
+
+/* File: c/OP_UNUSED_6BFF.c */
+HANDLE_OPCODE(OP_UNUSED_6BFF)
+OP_END
+
+/* File: c/OP_UNUSED_6CFF.c */
+HANDLE_OPCODE(OP_UNUSED_6CFF)
+OP_END
+
+/* File: c/OP_UNUSED_6DFF.c */
+HANDLE_OPCODE(OP_UNUSED_6DFF)
+OP_END
+
+/* File: c/OP_UNUSED_6EFF.c */
+HANDLE_OPCODE(OP_UNUSED_6EFF)
+OP_END
+
+/* File: c/OP_UNUSED_6FFF.c */
+HANDLE_OPCODE(OP_UNUSED_6FFF)
+OP_END
+
+/* File: c/OP_UNUSED_70FF.c */
+HANDLE_OPCODE(OP_UNUSED_70FF)
+OP_END
+
+/* File: c/OP_UNUSED_71FF.c */
+HANDLE_OPCODE(OP_UNUSED_71FF)
+OP_END
+
+/* File: c/OP_UNUSED_72FF.c */
+HANDLE_OPCODE(OP_UNUSED_72FF)
+OP_END
+
+/* File: c/OP_UNUSED_73FF.c */
+HANDLE_OPCODE(OP_UNUSED_73FF)
+OP_END
+
+/* File: c/OP_UNUSED_74FF.c */
+HANDLE_OPCODE(OP_UNUSED_74FF)
+OP_END
+
+/* File: c/OP_UNUSED_75FF.c */
+HANDLE_OPCODE(OP_UNUSED_75FF)
+OP_END
+
+/* File: c/OP_UNUSED_76FF.c */
+HANDLE_OPCODE(OP_UNUSED_76FF)
+OP_END
+
+/* File: c/OP_UNUSED_77FF.c */
+HANDLE_OPCODE(OP_UNUSED_77FF)
+OP_END
+
+/* File: c/OP_UNUSED_78FF.c */
+HANDLE_OPCODE(OP_UNUSED_78FF)
+OP_END
+
+/* File: c/OP_UNUSED_79FF.c */
+HANDLE_OPCODE(OP_UNUSED_79FF)
+OP_END
+
+/* File: c/OP_UNUSED_7AFF.c */
+HANDLE_OPCODE(OP_UNUSED_7AFF)
+OP_END
+
+/* File: c/OP_UNUSED_7BFF.c */
+HANDLE_OPCODE(OP_UNUSED_7BFF)
+OP_END
+
+/* File: c/OP_UNUSED_7CFF.c */
+HANDLE_OPCODE(OP_UNUSED_7CFF)
+OP_END
+
+/* File: c/OP_UNUSED_7DFF.c */
+HANDLE_OPCODE(OP_UNUSED_7DFF)
+OP_END
+
+/* File: c/OP_UNUSED_7EFF.c */
+HANDLE_OPCODE(OP_UNUSED_7EFF)
+OP_END
+
+/* File: c/OP_UNUSED_7FFF.c */
+HANDLE_OPCODE(OP_UNUSED_7FFF)
+OP_END
+
+/* File: c/OP_UNUSED_80FF.c */
+HANDLE_OPCODE(OP_UNUSED_80FF)
+OP_END
+
+/* File: c/OP_UNUSED_81FF.c */
+HANDLE_OPCODE(OP_UNUSED_81FF)
+OP_END
+
+/* File: c/OP_UNUSED_82FF.c */
+HANDLE_OPCODE(OP_UNUSED_82FF)
+OP_END
+
+/* File: c/OP_UNUSED_83FF.c */
+HANDLE_OPCODE(OP_UNUSED_83FF)
+OP_END
+
+/* File: c/OP_UNUSED_84FF.c */
+HANDLE_OPCODE(OP_UNUSED_84FF)
+OP_END
+
+/* File: c/OP_UNUSED_85FF.c */
+HANDLE_OPCODE(OP_UNUSED_85FF)
+OP_END
+
+/* File: c/OP_UNUSED_86FF.c */
+HANDLE_OPCODE(OP_UNUSED_86FF)
+OP_END
+
+/* File: c/OP_UNUSED_87FF.c */
+HANDLE_OPCODE(OP_UNUSED_87FF)
+OP_END
+
+/* File: c/OP_UNUSED_88FF.c */
+HANDLE_OPCODE(OP_UNUSED_88FF)
+OP_END
+
+/* File: c/OP_UNUSED_89FF.c */
+HANDLE_OPCODE(OP_UNUSED_89FF)
+OP_END
+
+/* File: c/OP_UNUSED_8AFF.c */
+HANDLE_OPCODE(OP_UNUSED_8AFF)
+OP_END
+
+/* File: c/OP_UNUSED_8BFF.c */
+HANDLE_OPCODE(OP_UNUSED_8BFF)
+OP_END
+
+/* File: c/OP_UNUSED_8CFF.c */
+HANDLE_OPCODE(OP_UNUSED_8CFF)
+OP_END
+
+/* File: c/OP_UNUSED_8DFF.c */
+HANDLE_OPCODE(OP_UNUSED_8DFF)
+OP_END
+
+/* File: c/OP_UNUSED_8EFF.c */
+HANDLE_OPCODE(OP_UNUSED_8EFF)
+OP_END
+
+/* File: c/OP_UNUSED_8FFF.c */
+HANDLE_OPCODE(OP_UNUSED_8FFF)
+OP_END
+
+/* File: c/OP_UNUSED_90FF.c */
+HANDLE_OPCODE(OP_UNUSED_90FF)
+OP_END
+
+/* File: c/OP_UNUSED_91FF.c */
+HANDLE_OPCODE(OP_UNUSED_91FF)
+OP_END
+
+/* File: c/OP_UNUSED_92FF.c */
+HANDLE_OPCODE(OP_UNUSED_92FF)
+OP_END
+
+/* File: c/OP_UNUSED_93FF.c */
+HANDLE_OPCODE(OP_UNUSED_93FF)
+OP_END
+
+/* File: c/OP_UNUSED_94FF.c */
+HANDLE_OPCODE(OP_UNUSED_94FF)
+OP_END
+
+/* File: c/OP_UNUSED_95FF.c */
+HANDLE_OPCODE(OP_UNUSED_95FF)
+OP_END
+
+/* File: c/OP_UNUSED_96FF.c */
+HANDLE_OPCODE(OP_UNUSED_96FF)
+OP_END
+
+/* File: c/OP_UNUSED_97FF.c */
+HANDLE_OPCODE(OP_UNUSED_97FF)
+OP_END
+
+/* File: c/OP_UNUSED_98FF.c */
+HANDLE_OPCODE(OP_UNUSED_98FF)
+OP_END
+
+/* File: c/OP_UNUSED_99FF.c */
+HANDLE_OPCODE(OP_UNUSED_99FF)
+OP_END
+
+/* File: c/OP_UNUSED_9AFF.c */
+HANDLE_OPCODE(OP_UNUSED_9AFF)
+OP_END
+
+/* File: c/OP_UNUSED_9BFF.c */
+HANDLE_OPCODE(OP_UNUSED_9BFF)
+OP_END
+
+/* File: c/OP_UNUSED_9CFF.c */
+HANDLE_OPCODE(OP_UNUSED_9CFF)
+OP_END
+
+/* File: c/OP_UNUSED_9DFF.c */
+HANDLE_OPCODE(OP_UNUSED_9DFF)
+OP_END
+
+/* File: c/OP_UNUSED_9EFF.c */
+HANDLE_OPCODE(OP_UNUSED_9EFF)
+OP_END
+
+/* File: c/OP_UNUSED_9FFF.c */
+HANDLE_OPCODE(OP_UNUSED_9FFF)
+OP_END
+
+/* File: c/OP_UNUSED_A0FF.c */
+HANDLE_OPCODE(OP_UNUSED_A0FF)
+OP_END
+
+/* File: c/OP_UNUSED_A1FF.c */
+HANDLE_OPCODE(OP_UNUSED_A1FF)
+OP_END
+
+/* File: c/OP_UNUSED_A2FF.c */
+HANDLE_OPCODE(OP_UNUSED_A2FF)
+OP_END
+
+/* File: c/OP_UNUSED_A3FF.c */
+HANDLE_OPCODE(OP_UNUSED_A3FF)
+OP_END
+
+/* File: c/OP_UNUSED_A4FF.c */
+HANDLE_OPCODE(OP_UNUSED_A4FF)
+OP_END
+
+/* File: c/OP_UNUSED_A5FF.c */
+HANDLE_OPCODE(OP_UNUSED_A5FF)
+OP_END
+
+/* File: c/OP_UNUSED_A6FF.c */
+HANDLE_OPCODE(OP_UNUSED_A6FF)
+OP_END
+
+/* File: c/OP_UNUSED_A7FF.c */
+HANDLE_OPCODE(OP_UNUSED_A7FF)
+OP_END
+
+/* File: c/OP_UNUSED_A8FF.c */
+HANDLE_OPCODE(OP_UNUSED_A8FF)
+OP_END
+
+/* File: c/OP_UNUSED_A9FF.c */
+HANDLE_OPCODE(OP_UNUSED_A9FF)
+OP_END
+
+/* File: c/OP_UNUSED_AAFF.c */
+HANDLE_OPCODE(OP_UNUSED_AAFF)
+OP_END
+
+/* File: c/OP_UNUSED_ABFF.c */
+HANDLE_OPCODE(OP_UNUSED_ABFF)
+OP_END
+
+/* File: c/OP_UNUSED_ACFF.c */
+HANDLE_OPCODE(OP_UNUSED_ACFF)
+OP_END
+
+/* File: c/OP_UNUSED_ADFF.c */
+HANDLE_OPCODE(OP_UNUSED_ADFF)
+OP_END
+
+/* File: c/OP_UNUSED_AEFF.c */
+HANDLE_OPCODE(OP_UNUSED_AEFF)
+OP_END
+
+/* File: c/OP_UNUSED_AFFF.c */
+HANDLE_OPCODE(OP_UNUSED_AFFF)
+OP_END
+
+/* File: c/OP_UNUSED_B0FF.c */
+HANDLE_OPCODE(OP_UNUSED_B0FF)
+OP_END
+
+/* File: c/OP_UNUSED_B1FF.c */
+HANDLE_OPCODE(OP_UNUSED_B1FF)
+OP_END
+
+/* File: c/OP_UNUSED_B2FF.c */
+HANDLE_OPCODE(OP_UNUSED_B2FF)
+OP_END
+
+/* File: c/OP_UNUSED_B3FF.c */
+HANDLE_OPCODE(OP_UNUSED_B3FF)
+OP_END
+
+/* File: c/OP_UNUSED_B4FF.c */
+HANDLE_OPCODE(OP_UNUSED_B4FF)
+OP_END
+
+/* File: c/OP_UNUSED_B5FF.c */
+HANDLE_OPCODE(OP_UNUSED_B5FF)
+OP_END
+
+/* File: c/OP_UNUSED_B6FF.c */
+HANDLE_OPCODE(OP_UNUSED_B6FF)
+OP_END
+
+/* File: c/OP_UNUSED_B7FF.c */
+HANDLE_OPCODE(OP_UNUSED_B7FF)
+OP_END
+
+/* File: c/OP_UNUSED_B8FF.c */
+HANDLE_OPCODE(OP_UNUSED_B8FF)
+OP_END
+
+/* File: c/OP_UNUSED_B9FF.c */
+HANDLE_OPCODE(OP_UNUSED_B9FF)
+OP_END
+
+/* File: c/OP_UNUSED_BAFF.c */
+HANDLE_OPCODE(OP_UNUSED_BAFF)
+OP_END
+
+/* File: c/OP_UNUSED_BBFF.c */
+HANDLE_OPCODE(OP_UNUSED_BBFF)
+OP_END
+
+/* File: c/OP_UNUSED_BCFF.c */
+HANDLE_OPCODE(OP_UNUSED_BCFF)
+OP_END
+
+/* File: c/OP_UNUSED_BDFF.c */
+HANDLE_OPCODE(OP_UNUSED_BDFF)
+OP_END
+
+/* File: c/OP_UNUSED_BEFF.c */
+HANDLE_OPCODE(OP_UNUSED_BEFF)
+OP_END
+
+/* File: c/OP_UNUSED_BFFF.c */
+HANDLE_OPCODE(OP_UNUSED_BFFF)
+OP_END
+
+/* File: c/OP_UNUSED_C0FF.c */
+HANDLE_OPCODE(OP_UNUSED_C0FF)
+OP_END
+
+/* File: c/OP_UNUSED_C1FF.c */
+HANDLE_OPCODE(OP_UNUSED_C1FF)
+OP_END
+
+/* File: c/OP_UNUSED_C2FF.c */
+HANDLE_OPCODE(OP_UNUSED_C2FF)
+OP_END
+
+/* File: c/OP_UNUSED_C3FF.c */
+HANDLE_OPCODE(OP_UNUSED_C3FF)
+OP_END
+
+/* File: c/OP_UNUSED_C4FF.c */
+HANDLE_OPCODE(OP_UNUSED_C4FF)
+OP_END
+
+/* File: c/OP_UNUSED_C5FF.c */
+HANDLE_OPCODE(OP_UNUSED_C5FF)
+OP_END
+
+/* File: c/OP_UNUSED_C6FF.c */
+HANDLE_OPCODE(OP_UNUSED_C6FF)
+OP_END
+
+/* File: c/OP_UNUSED_C7FF.c */
+HANDLE_OPCODE(OP_UNUSED_C7FF)
+OP_END
+
+/* File: c/OP_UNUSED_C8FF.c */
+HANDLE_OPCODE(OP_UNUSED_C8FF)
+OP_END
+
+/* File: c/OP_UNUSED_C9FF.c */
+HANDLE_OPCODE(OP_UNUSED_C9FF)
+OP_END
+
+/* File: c/OP_UNUSED_CAFF.c */
+HANDLE_OPCODE(OP_UNUSED_CAFF)
+OP_END
+
+/* File: c/OP_UNUSED_CBFF.c */
+HANDLE_OPCODE(OP_UNUSED_CBFF)
+OP_END
+
+/* File: c/OP_UNUSED_CCFF.c */
+HANDLE_OPCODE(OP_UNUSED_CCFF)
+OP_END
+
+/* File: c/OP_UNUSED_CDFF.c */
+HANDLE_OPCODE(OP_UNUSED_CDFF)
+OP_END
+
+/* File: c/OP_UNUSED_CEFF.c */
+HANDLE_OPCODE(OP_UNUSED_CEFF)
+OP_END
+
+/* File: c/OP_UNUSED_CFFF.c */
+HANDLE_OPCODE(OP_UNUSED_CFFF)
+OP_END
+
+/* File: c/OP_UNUSED_D0FF.c */
+HANDLE_OPCODE(OP_UNUSED_D0FF)
+OP_END
+
+/* File: c/OP_UNUSED_D1FF.c */
+HANDLE_OPCODE(OP_UNUSED_D1FF)
+OP_END
+
+/* File: c/OP_UNUSED_D2FF.c */
+HANDLE_OPCODE(OP_UNUSED_D2FF)
+OP_END
+
+/* File: c/OP_UNUSED_D3FF.c */
+HANDLE_OPCODE(OP_UNUSED_D3FF)
+OP_END
+
+/* File: c/OP_UNUSED_D4FF.c */
+HANDLE_OPCODE(OP_UNUSED_D4FF)
+OP_END
+
+/* File: c/OP_UNUSED_D5FF.c */
+HANDLE_OPCODE(OP_UNUSED_D5FF)
+OP_END
+
+/* File: c/OP_UNUSED_D6FF.c */
+HANDLE_OPCODE(OP_UNUSED_D6FF)
+OP_END
+
+/* File: c/OP_UNUSED_D7FF.c */
+HANDLE_OPCODE(OP_UNUSED_D7FF)
+OP_END
+
+/* File: c/OP_UNUSED_D8FF.c */
+HANDLE_OPCODE(OP_UNUSED_D8FF)
+OP_END
+
+/* File: c/OP_UNUSED_D9FF.c */
+HANDLE_OPCODE(OP_UNUSED_D9FF)
+OP_END
+
+/* File: c/OP_UNUSED_DAFF.c */
+HANDLE_OPCODE(OP_UNUSED_DAFF)
+OP_END
+
+/* File: c/OP_UNUSED_DBFF.c */
+HANDLE_OPCODE(OP_UNUSED_DBFF)
+OP_END
+
+/* File: c/OP_UNUSED_DCFF.c */
+HANDLE_OPCODE(OP_UNUSED_DCFF)
+OP_END
+
+/* File: c/OP_UNUSED_DDFF.c */
+HANDLE_OPCODE(OP_UNUSED_DDFF)
+OP_END
+
+/* File: c/OP_UNUSED_DEFF.c */
+HANDLE_OPCODE(OP_UNUSED_DEFF)
+OP_END
+
+/* File: c/OP_UNUSED_DFFF.c */
+HANDLE_OPCODE(OP_UNUSED_DFFF)
+OP_END
+
+/* File: c/OP_UNUSED_E0FF.c */
+HANDLE_OPCODE(OP_UNUSED_E0FF)
+OP_END
+
+/* File: c/OP_UNUSED_E1FF.c */
+HANDLE_OPCODE(OP_UNUSED_E1FF)
+OP_END
+
+/* File: c/OP_UNUSED_E2FF.c */
+HANDLE_OPCODE(OP_UNUSED_E2FF)
+OP_END
+
+/* File: c/OP_UNUSED_E3FF.c */
+HANDLE_OPCODE(OP_UNUSED_E3FF)
+OP_END
+
+/* File: c/OP_UNUSED_E4FF.c */
+HANDLE_OPCODE(OP_UNUSED_E4FF)
+OP_END
+
+/* File: c/OP_UNUSED_E5FF.c */
+HANDLE_OPCODE(OP_UNUSED_E5FF)
+OP_END
+
+/* File: c/OP_UNUSED_E6FF.c */
+HANDLE_OPCODE(OP_UNUSED_E6FF)
+OP_END
+
+/* File: c/OP_UNUSED_E7FF.c */
+HANDLE_OPCODE(OP_UNUSED_E7FF)
+OP_END
+
+/* File: c/OP_UNUSED_E8FF.c */
+HANDLE_OPCODE(OP_UNUSED_E8FF)
+OP_END
+
+/* File: c/OP_UNUSED_E9FF.c */
+HANDLE_OPCODE(OP_UNUSED_E9FF)
+OP_END
+
+/* File: c/OP_UNUSED_EAFF.c */
+HANDLE_OPCODE(OP_UNUSED_EAFF)
+OP_END
+
+/* File: c/OP_UNUSED_EBFF.c */
+HANDLE_OPCODE(OP_UNUSED_EBFF)
+OP_END
+
+/* File: c/OP_UNUSED_ECFF.c */
+HANDLE_OPCODE(OP_UNUSED_ECFF)
+OP_END
+
+/* File: c/OP_UNUSED_EDFF.c */
+HANDLE_OPCODE(OP_UNUSED_EDFF)
+OP_END
+
+/* File: c/OP_UNUSED_EEFF.c */
+HANDLE_OPCODE(OP_UNUSED_EEFF)
+OP_END
+
+/* File: c/OP_UNUSED_EFFF.c */
+HANDLE_OPCODE(OP_UNUSED_EFFF)
+OP_END
+
+/* File: c/OP_UNUSED_F0FF.c */
+HANDLE_OPCODE(OP_UNUSED_F0FF)
+OP_END
+
+/* File: c/OP_UNUSED_F1FF.c */
+HANDLE_OPCODE(OP_UNUSED_F1FF)
+    /*
      * In portable interp, most unused opcodes will fall through to here.
      */
-    LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+    LOGE("unknown opcode 0x%04x\n", inst);
     dvmAbort();
     FINISH(1);
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
+
+/* File: c/OP_IGET_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_THROW_VERIFICATION_ERROR_JUMBO.c */
+HANDLE_OPCODE(OP_THROW_VERIFICATION_ERROR_JUMBO)
+    EXPORT_PC();
+    vsrc1 = FETCH(3);
+    ref = FETCH(1) | (u4)FETCH(2) << 16;      /* class/field/method ref */
+    dvmThrowVerificationError(curMethod, vsrc1, ref);
+    GOTO_exceptionThrown();
+OP_END
+
 /* File: c/gotoTargets.c */
 /*
  * C footer.  This has some common code shared by the various targets.
@@ -3413,7 +4752,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -3424,19 +4763,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -3450,7 +4798,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -3466,13 +4814,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -3505,35 +4852,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -3574,8 +4935,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3605,26 +4965,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -3659,15 +5030,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3683,32 +5052,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -3733,25 +5113,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -3768,19 +5159,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -3797,13 +5197,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -3840,13 +5240,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3862,7 +5261,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -3887,11 +5286,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -3907,8 +5306,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3956,7 +5354,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -4010,8 +5408,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -4043,7 +5441,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -4068,7 +5466,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -4301,7 +5699,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -4314,18 +5712,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -4352,7 +5746,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
@@ -4407,7 +5801,7 @@
 bail:
     ILOGD("|-- Leaving interpreter loop");      // note "curMethod" may be NULL
 
-    interpState->retval = retval;
+    self->retval = retval;
     return false;
 
 bail_switch:
@@ -4419,18 +5813,18 @@
      * TODO: figure out if preserving this makes any sense.
      */
 #if INTERP_TYPE == INTERP_DBG
-    interpState->debugIsMethodEntry = debugIsMethodEntry;
+    self->debugIsMethodEntry = debugIsMethodEntry;
 #else
-    interpState->debugIsMethodEntry = false;
+    self->debugIsMethodEntry = false;
 #endif
 
     /* export state changes */
-    interpState->method = curMethod;
-    interpState->pc = pc;
-    interpState->fp = fp;
+    self->interpSave.method = curMethod;
+    self->interpSave.pc = pc;
+    self->interpSave.fp = fp;
     /* debugTrackedRefStart doesn't change */
-    interpState->retval = retval;   /* need for _entryPoint=ret */
-    interpState->nextMode =
+    self->retval = retval;   /* need for _entryPoint=ret */
+    self->nextMode =
         (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
     LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
         curMethod->clazz->descriptor, curMethod->name,
diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c
index 9abe3c4..b95f847 100644
--- a/vm/mterp/out/InterpC-portstd.c
+++ b/vm/mterp/out/InterpC-portstd.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -419,7 +418,7 @@
 
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /* File: portable/stubdefs.c */
 /*
@@ -460,10 +459,14 @@
 # define FINISH_BKPT(_opcode) {                                             \
         goto *handlerTable[_opcode];                                        \
     }
+# define DISPATCH_EXTENDED(_opcode) {                                       \
+        goto *handlerTable[0x100 + _opcode];                                \
+    }
 #else
 # define HANDLE_OPCODE(_op) case _op:
 # define FINISH(_offset)    { ADJUST_PC(_offset); break; }
 # define FINISH_BKPT(opcode) { > not implemented < }
+# define DISPATCH_EXTENDED(opcode) goto case (0x100 + opcode);
 #endif
 
 #define OP_END
@@ -485,9 +488,10 @@
 
 #define GOTO_returnFromMethod() goto returnFromMethod;
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
         methodCallRange = _methodCallRange;                                 \
+        jumboFormat = _jumboFormat;                                         \
         goto _target;                                                       \
     } while(false)
 
@@ -510,10 +514,10 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            interpState->entryPoint = _entryPoint;                          \
+            self->entryPoint = _entryPoint;                          \
             LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n",               \
                 self->threadId,                                             \
-                (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",      \
+                (self->nextMode == INTERP_STD) ? "STD" : "DBG",      \
                 (_entryPoint), (_pcadj));                                   \
             GOTO_bail_switch();                                             \
         }                                                                   \
@@ -521,14 +525,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -671,8 +675,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -718,9 +721,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -753,8 +755,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -799,8 +800,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -842,8 +842,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -889,8 +888,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -980,7 +978,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1004,7 +1003,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1057,6 +1057,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1102,6 +1130,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1139,7 +1195,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1149,6 +1205,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1163,7 +1243,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1173,23 +1253,47 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: portable/entry.c */
 /*
  * Main interpreter loop.
  *
  * This was written with an ARM implementation in mind.
  */
-bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+bool INTERP_FUNC_NAME(Thread* self)
 {
 #if defined(EASY_GDB)
     StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     bool debugIsMethodEntry = false;
-    debugIsMethodEntry = interpState->debugIsMethodEntry;
+    debugIsMethodEntry = self->debugIsMethodEntry;
 #endif
 #if defined(WITH_TRACKREF_CHECKS)
-    int debugTrackedRefStart = interpState->debugTrackedRefStart;
+    int debugTrackedRefStart = self->interpSave.debugTrackedRefStart;
 #endif
     DvmDex* methodClassDex;     // curMethod->clazz->pDvmDex
     JValue retval;
@@ -1200,11 +1304,12 @@
     u4* fp;                     // frame pointer
     u2 inst;                    // current instruction
     /* instruction decoding */
-    u2 ref;                     // 16-bit quantity fetched directly
+    u4 ref;                     // 16 or 32-bit quantity fetched directly
     u2 vsrc1, vsrc2, vdst;      // usually used for register indexes
     /* method call setup */
     const Method* methodToCall;
     bool methodCallRange;
+    bool jumboFormat;
 
 
 #if defined(THREADED_INTERP)
@@ -1215,16 +1320,16 @@
 #if defined(WITH_JIT)
 #if 0
     LOGD("*DebugInterp - entrypoint is %d, tgt is 0x%x, %s\n",
-         interpState->entryPoint,
-         interpState->pc,
-         interpState->method->name);
+         self->entryPoint,
+         self->interpSave.pc,
+         self->interpSave.method->name);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     const ClassObject* callsiteClass = NULL;
 
 #if defined(WITH_SELF_VERIFICATION)
-    if (interpState->jitState != kJitSelfVerification) {
-        interpState->self->shadowSpace->jitExitState = kSVSIdle;
+    if (self->jitState != kJitSelfVerification) {
+        self->shadowSpace->jitExitState = kSVSIdle;
     }
 #endif
 
@@ -1237,11 +1342,11 @@
           * dvmJitCheckTraceRequest will change the jitState to kJitDone but
           * but stay in the dbg interpreter.
           */
-         (interpState->entryPoint == kInterpEntryInstr) &&
-         (interpState->jitState == kJitTSelectRequest ||
-          interpState->jitState == kJitTSelectRequestHot) &&
-         dvmJitCheckTraceRequest(self, interpState)) {
-        interpState->nextMode = INTERP_STD;
+         (self->entryPoint == kInterpEntryInstr) &&
+         (self->jitState == kJitTSelectRequest ||
+          self->jitState == kJitTSelectRequestHot) &&
+         dvmJitCheckTraceRequest(self)) {
+        self->nextMode = INTERP_STD;
         //LOGD("Invalid trace request, exiting\n");
         return true;
     }
@@ -1249,17 +1354,17 @@
 #endif /* WITH_JIT */
 
     /* copy state in */
-    curMethod = interpState->method;
-    pc = interpState->pc;
-    fp = interpState->fp;
-    retval = interpState->retval;   /* only need for kInterpEntryReturn? */
+    curMethod = self->interpSave.method;
+    pc = self->interpSave.pc;
+    fp = self->interpSave.fp;
+    retval = self->retval;   /* only need for kInterpEntryReturn? */
 
     methodClassDex = curMethod->clazz->pDvmDex;
 
     LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
-        self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+        self->threadId, (self->nextMode == INTERP_STD) ? "STD" : "DBG",
         curMethod->clazz->descriptor, curMethod->name, pc - curMethod->insns,
-        fp, interpState->entryPoint);
+        fp, self->entryPoint);
 
     /*
      * DEBUG: scramble this to ensure we're not relying on it.
@@ -1270,11 +1375,11 @@
     if (debugIsMethodEntry) {
         ILOGD("|-- Now interpreting %s.%s", curMethod->clazz->descriptor,
                 curMethod->name);
-        DUMP_REGS(curMethod, interpState->fp, false);
+        DUMP_REGS(curMethod, self->interpSave.fp, false);
     }
 #endif
 
-    switch (interpState->entryPoint) {
+    switch (self->entryPoint) {
     case kInterpEntryInstr:
         /* just fall through to instruction loop or threaded kickstart */
         break;
@@ -1655,12 +1760,8 @@
         if (!checkForNullExportPC(obj, fp, pc))
             GOTO_exceptionThrown();
         ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
-        EXPORT_PC();    /* need for precise GC, also WITH_MONITOR_TRACKING */
+        EXPORT_PC();    /* need for precise GC */
         dvmLockObject(self, obj);
-#ifdef WITH_DEADLOCK_PREDICTION
-        if (dvmCheckException(self))
-            GOTO_exceptionThrown();
-#endif
     }
     FINISH(1);
 OP_END
@@ -1806,15 +1907,15 @@
          * check is not needed for mterp.
          */
         if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
-            /* Class initialization is still ongoing - abandon the trace */
-            ABORT_JIT_TSELECT();
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
         }
 
         /*
          * Verifier now tests for interface/abstract class.
          */
         //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
-        //    dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
         //        clazz->descriptor);
         //    GOTO_exceptionThrown();
         //}
@@ -1842,7 +1943,7 @@
             vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
         length = (s4) GET_REGISTER(vsrc1);
         if (length < 0) {
-            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            dvmThrowNegativeArraySizeException(length);
             GOTO_exceptionThrown();
         }
         arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
@@ -1865,12 +1966,12 @@
 
 /* File: c/OP_FILLED_NEW_ARRAY.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
-    GOTO_invoke(filledNewArray, false);
+    GOTO_invoke(filledNewArray, false, false);
 OP_END
 
 /* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
 HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
-    GOTO_invoke(filledNewArray, true);
+    GOTO_invoke(filledNewArray, true, false);
 OP_END
 
 /* File: c/OP_FILL_ARRAY_DATA.c */
@@ -1890,8 +1991,7 @@
             arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
         {
             /* should have been caught in verifier */
-            dvmThrowException("Ljava/lang/InternalError;",
-                              "bad fill array data");
+            dvmThrowInternalError("bad fill array data");
             GOTO_exceptionThrown();
         }
 #endif
@@ -1992,7 +2092,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            dvmThrowInternalError("bad packed switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2023,7 +2123,7 @@
         {
             /* should have been caught in verifier */
             EXPORT_PC();
-            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            dvmThrowInternalError("bad sparse switch");
             GOTO_exceptionThrown();
         }
 #endif
@@ -2181,7 +2281,8 @@
         if (!checkForNull((Object*) arrayObj))
             GOTO_exceptionThrown();
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);
+            dvmThrowArrayIndexOutOfBoundsException(
+                arrayObj->length, GET_REGISTER(vsrc2));
             GOTO_exceptionThrown();
         }
         obj = (Object*) GET_REGISTER(vdst);
@@ -2345,27 +2446,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtual, false);
+    GOTO_invoke(invokeVirtual, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuper, false);
+    GOTO_invoke(invokeSuper, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeDirect, false);
+    GOTO_invoke(invokeDirect, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeStatic, false);
+    GOTO_invoke(invokeStatic, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeInterface, false);
+    GOTO_invoke(invokeInterface, false, false);
 OP_END
 
 /* File: c/OP_UNUSED_73.c */
@@ -2374,27 +2475,27 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtual, true);
+    GOTO_invoke(invokeVirtual, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuper, true);
+    GOTO_invoke(invokeSuper, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_DIRECT_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeDirect, true);
+    GOTO_invoke(invokeDirect, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_STATIC_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeStatic, true);
+    GOTO_invoke(invokeStatic, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_INTERFACE_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeInterface, true);
+    GOTO_invoke(invokeInterface, true, false);
 OP_END
 
 /* File: c/OP_UNUSED_79.c */
@@ -3059,21 +3160,34 @@
     FINISH(3);
 OP_END
 
-/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
-HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-#if INTERP_TYPE != INTERP_DBG
-    //LOGI("Ignoring empty\n");
-    FINISH(3);
-#else
-    if (!gDvm.debuggerActive) {
-        //LOGI("Skipping empty\n");
-        FINISH(3);      // don't want it to show up in profiler output
-    } else {
-        //LOGI("Running empty\n");
-        /* fall through to OP_INVOKE_DIRECT */
-        GOTO_invoke(invokeDirect, false);
-    }
+/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
 #endif
+        FINISH(3);
+    }
 OP_END
 
 /* File: c/OP_RETURN_VOID_BARRIER.c */
@@ -3112,22 +3226,22 @@
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeVirtualQuick, false);
+    GOTO_invoke(invokeVirtualQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeVirtualQuick, true);
+    GOTO_invoke(invokeVirtualQuick, true, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
-    GOTO_invoke(invokeSuperQuick, false);
+    GOTO_invoke(invokeSuperQuick, false, false);
 OP_END
 
 /* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
 HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
-    GOTO_invoke(invokeSuperQuick, true);
+    GOTO_invoke(invokeSuperQuick, true, false);
 OP_END
 
 /* File: c/OP_IPUT_OBJECT_VOLATILE.c */
@@ -3145,13 +3259,1238 @@
 /* File: c/OP_DISPATCH_FF.c */
 HANDLE_OPCODE(OP_DISPATCH_FF)
     /*
+     * Indicates extended opcode.  Use next 8 bits to choose where to branch.
+     */
+    DISPATCH_EXTENDED(INST_AA(inst));
+OP_END
+
+/* File: c/OP_CONST_CLASS_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_CLASS_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|const-class/jumbo v%d class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(curMethod->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_CHECK_CAST_JUMBO.c */
+HANDLE_OPCODE(OP_CHECK_CAST_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vsrc1 = FETCH(3);
+        ILOGV("|check-cast/jumbo v%d,class@0x%08x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(curMethod->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowClassCastException(obj->clazz, clazz);
+                GOTO_exceptionThrown();
+            }
+        }
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_INSTANCE_OF_JUMBO.c */
+HANDLE_OPCODE(OP_INSTANCE_OF_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;     /* class to check against */
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);   /* object to check */
+        ILOGV("|instance-of/jumbo v%d,v%d,class@0x%08x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO_exceptionThrown();
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(curMethod->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO_exceptionThrown();
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE_JUMBO /*vBBBB, class@AAAAAAAA*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        ILOGV("|new-instance/jumbo v%d,class@0x%08x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(curMethod->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO_exceptionThrown();
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO_exceptionThrown();
+
+        /*
+         * The JIT needs dvmDexGetResolvedClass() to return non-null.
+         * Since we use the portable interpreter to build the trace, this extra
+         * check is not needed for mterp.
+         */
+        if (!dvmDexGetResolvedClass(methodClassDex, ref)) {
+            /* Class initialization is still ongoing - end the trace */
+            END_JIT_TSELECT();
+        }
+
+        /*
+         * Verifier now tests for interface/abstract class.
+         */
+        //if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+        //    dvmThrowExceptionWithClassMessage(gDvm.exInstantiationError,
+        //        clazz->descriptor);
+        //    GOTO_exceptionThrown();
+        //}
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(4);
+OP_END
+
+/* File: c/OP_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_NEW_ARRAY_JUMBO /*vBBBB, vCCCC, class@AAAAAAAA*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        ref = FETCH(1) | (u4)FETCH(2) << 16;
+        vdst = FETCH(3);
+        vsrc1 = FETCH(4);       /* length reg */
+        ILOGV("|new-array/jumbo v%d,v%d,class@0x%08x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowNegativeArraySizeException(length);
+            GOTO_exceptionThrown();
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO_exceptionThrown();
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO_exceptionThrown();
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_JUMBO.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, class@AAAAAAAA*/)
+    GOTO_invoke(filledNewArray, true, true);
+OP_END
+
+/* File: c/OP_IGET_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_JUMBO.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible.  In practice, many popular VMs don't
+ * do this because it slows down a very common operation.  It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SGET_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BOOLEAN_JUMBO,  "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_JUMBO,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_JUMBO,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BOOLEAN_JUMBO,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_BYTE_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_CHAR_JUMBO,     "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_SHORT_JUMBO,    "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeVirtual, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeSuper, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeDirect, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeStatic, true, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_JUMBO /*{vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA*/)
+    GOTO_invoke(invokeInterface, true, true);
+OP_END
+
+/* File: c/OP_UNUSED_27FF.c */
+HANDLE_OPCODE(OP_UNUSED_27FF)
+OP_END
+
+/* File: c/OP_UNUSED_28FF.c */
+HANDLE_OPCODE(OP_UNUSED_28FF)
+OP_END
+
+/* File: c/OP_UNUSED_29FF.c */
+HANDLE_OPCODE(OP_UNUSED_29FF)
+OP_END
+
+/* File: c/OP_UNUSED_2AFF.c */
+HANDLE_OPCODE(OP_UNUSED_2AFF)
+OP_END
+
+/* File: c/OP_UNUSED_2BFF.c */
+HANDLE_OPCODE(OP_UNUSED_2BFF)
+OP_END
+
+/* File: c/OP_UNUSED_2CFF.c */
+HANDLE_OPCODE(OP_UNUSED_2CFF)
+OP_END
+
+/* File: c/OP_UNUSED_2DFF.c */
+HANDLE_OPCODE(OP_UNUSED_2DFF)
+OP_END
+
+/* File: c/OP_UNUSED_2EFF.c */
+HANDLE_OPCODE(OP_UNUSED_2EFF)
+OP_END
+
+/* File: c/OP_UNUSED_2FFF.c */
+HANDLE_OPCODE(OP_UNUSED_2FFF)
+OP_END
+
+/* File: c/OP_UNUSED_30FF.c */
+HANDLE_OPCODE(OP_UNUSED_30FF)
+OP_END
+
+/* File: c/OP_UNUSED_31FF.c */
+HANDLE_OPCODE(OP_UNUSED_31FF)
+OP_END
+
+/* File: c/OP_UNUSED_32FF.c */
+HANDLE_OPCODE(OP_UNUSED_32FF)
+OP_END
+
+/* File: c/OP_UNUSED_33FF.c */
+HANDLE_OPCODE(OP_UNUSED_33FF)
+OP_END
+
+/* File: c/OP_UNUSED_34FF.c */
+HANDLE_OPCODE(OP_UNUSED_34FF)
+OP_END
+
+/* File: c/OP_UNUSED_35FF.c */
+HANDLE_OPCODE(OP_UNUSED_35FF)
+OP_END
+
+/* File: c/OP_UNUSED_36FF.c */
+HANDLE_OPCODE(OP_UNUSED_36FF)
+OP_END
+
+/* File: c/OP_UNUSED_37FF.c */
+HANDLE_OPCODE(OP_UNUSED_37FF)
+OP_END
+
+/* File: c/OP_UNUSED_38FF.c */
+HANDLE_OPCODE(OP_UNUSED_38FF)
+OP_END
+
+/* File: c/OP_UNUSED_39FF.c */
+HANDLE_OPCODE(OP_UNUSED_39FF)
+OP_END
+
+/* File: c/OP_UNUSED_3AFF.c */
+HANDLE_OPCODE(OP_UNUSED_3AFF)
+OP_END
+
+/* File: c/OP_UNUSED_3BFF.c */
+HANDLE_OPCODE(OP_UNUSED_3BFF)
+OP_END
+
+/* File: c/OP_UNUSED_3CFF.c */
+HANDLE_OPCODE(OP_UNUSED_3CFF)
+OP_END
+
+/* File: c/OP_UNUSED_3DFF.c */
+HANDLE_OPCODE(OP_UNUSED_3DFF)
+OP_END
+
+/* File: c/OP_UNUSED_3EFF.c */
+HANDLE_OPCODE(OP_UNUSED_3EFF)
+OP_END
+
+/* File: c/OP_UNUSED_3FFF.c */
+HANDLE_OPCODE(OP_UNUSED_3FFF)
+OP_END
+
+/* File: c/OP_UNUSED_40FF.c */
+HANDLE_OPCODE(OP_UNUSED_40FF)
+OP_END
+
+/* File: c/OP_UNUSED_41FF.c */
+HANDLE_OPCODE(OP_UNUSED_41FF)
+OP_END
+
+/* File: c/OP_UNUSED_42FF.c */
+HANDLE_OPCODE(OP_UNUSED_42FF)
+OP_END
+
+/* File: c/OP_UNUSED_43FF.c */
+HANDLE_OPCODE(OP_UNUSED_43FF)
+OP_END
+
+/* File: c/OP_UNUSED_44FF.c */
+HANDLE_OPCODE(OP_UNUSED_44FF)
+OP_END
+
+/* File: c/OP_UNUSED_45FF.c */
+HANDLE_OPCODE(OP_UNUSED_45FF)
+OP_END
+
+/* File: c/OP_UNUSED_46FF.c */
+HANDLE_OPCODE(OP_UNUSED_46FF)
+OP_END
+
+/* File: c/OP_UNUSED_47FF.c */
+HANDLE_OPCODE(OP_UNUSED_47FF)
+OP_END
+
+/* File: c/OP_UNUSED_48FF.c */
+HANDLE_OPCODE(OP_UNUSED_48FF)
+OP_END
+
+/* File: c/OP_UNUSED_49FF.c */
+HANDLE_OPCODE(OP_UNUSED_49FF)
+OP_END
+
+/* File: c/OP_UNUSED_4AFF.c */
+HANDLE_OPCODE(OP_UNUSED_4AFF)
+OP_END
+
+/* File: c/OP_UNUSED_4BFF.c */
+HANDLE_OPCODE(OP_UNUSED_4BFF)
+OP_END
+
+/* File: c/OP_UNUSED_4CFF.c */
+HANDLE_OPCODE(OP_UNUSED_4CFF)
+OP_END
+
+/* File: c/OP_UNUSED_4DFF.c */
+HANDLE_OPCODE(OP_UNUSED_4DFF)
+OP_END
+
+/* File: c/OP_UNUSED_4EFF.c */
+HANDLE_OPCODE(OP_UNUSED_4EFF)
+OP_END
+
+/* File: c/OP_UNUSED_4FFF.c */
+HANDLE_OPCODE(OP_UNUSED_4FFF)
+OP_END
+
+/* File: c/OP_UNUSED_50FF.c */
+HANDLE_OPCODE(OP_UNUSED_50FF)
+OP_END
+
+/* File: c/OP_UNUSED_51FF.c */
+HANDLE_OPCODE(OP_UNUSED_51FF)
+OP_END
+
+/* File: c/OP_UNUSED_52FF.c */
+HANDLE_OPCODE(OP_UNUSED_52FF)
+OP_END
+
+/* File: c/OP_UNUSED_53FF.c */
+HANDLE_OPCODE(OP_UNUSED_53FF)
+OP_END
+
+/* File: c/OP_UNUSED_54FF.c */
+HANDLE_OPCODE(OP_UNUSED_54FF)
+OP_END
+
+/* File: c/OP_UNUSED_55FF.c */
+HANDLE_OPCODE(OP_UNUSED_55FF)
+OP_END
+
+/* File: c/OP_UNUSED_56FF.c */
+HANDLE_OPCODE(OP_UNUSED_56FF)
+OP_END
+
+/* File: c/OP_UNUSED_57FF.c */
+HANDLE_OPCODE(OP_UNUSED_57FF)
+OP_END
+
+/* File: c/OP_UNUSED_58FF.c */
+HANDLE_OPCODE(OP_UNUSED_58FF)
+OP_END
+
+/* File: c/OP_UNUSED_59FF.c */
+HANDLE_OPCODE(OP_UNUSED_59FF)
+OP_END
+
+/* File: c/OP_UNUSED_5AFF.c */
+HANDLE_OPCODE(OP_UNUSED_5AFF)
+OP_END
+
+/* File: c/OP_UNUSED_5BFF.c */
+HANDLE_OPCODE(OP_UNUSED_5BFF)
+OP_END
+
+/* File: c/OP_UNUSED_5CFF.c */
+HANDLE_OPCODE(OP_UNUSED_5CFF)
+OP_END
+
+/* File: c/OP_UNUSED_5DFF.c */
+HANDLE_OPCODE(OP_UNUSED_5DFF)
+OP_END
+
+/* File: c/OP_UNUSED_5EFF.c */
+HANDLE_OPCODE(OP_UNUSED_5EFF)
+OP_END
+
+/* File: c/OP_UNUSED_5FFF.c */
+HANDLE_OPCODE(OP_UNUSED_5FFF)
+OP_END
+
+/* File: c/OP_UNUSED_60FF.c */
+HANDLE_OPCODE(OP_UNUSED_60FF)
+OP_END
+
+/* File: c/OP_UNUSED_61FF.c */
+HANDLE_OPCODE(OP_UNUSED_61FF)
+OP_END
+
+/* File: c/OP_UNUSED_62FF.c */
+HANDLE_OPCODE(OP_UNUSED_62FF)
+OP_END
+
+/* File: c/OP_UNUSED_63FF.c */
+HANDLE_OPCODE(OP_UNUSED_63FF)
+OP_END
+
+/* File: c/OP_UNUSED_64FF.c */
+HANDLE_OPCODE(OP_UNUSED_64FF)
+OP_END
+
+/* File: c/OP_UNUSED_65FF.c */
+HANDLE_OPCODE(OP_UNUSED_65FF)
+OP_END
+
+/* File: c/OP_UNUSED_66FF.c */
+HANDLE_OPCODE(OP_UNUSED_66FF)
+OP_END
+
+/* File: c/OP_UNUSED_67FF.c */
+HANDLE_OPCODE(OP_UNUSED_67FF)
+OP_END
+
+/* File: c/OP_UNUSED_68FF.c */
+HANDLE_OPCODE(OP_UNUSED_68FF)
+OP_END
+
+/* File: c/OP_UNUSED_69FF.c */
+HANDLE_OPCODE(OP_UNUSED_69FF)
+OP_END
+
+/* File: c/OP_UNUSED_6AFF.c */
+HANDLE_OPCODE(OP_UNUSED_6AFF)
+OP_END
+
+/* File: c/OP_UNUSED_6BFF.c */
+HANDLE_OPCODE(OP_UNUSED_6BFF)
+OP_END
+
+/* File: c/OP_UNUSED_6CFF.c */
+HANDLE_OPCODE(OP_UNUSED_6CFF)
+OP_END
+
+/* File: c/OP_UNUSED_6DFF.c */
+HANDLE_OPCODE(OP_UNUSED_6DFF)
+OP_END
+
+/* File: c/OP_UNUSED_6EFF.c */
+HANDLE_OPCODE(OP_UNUSED_6EFF)
+OP_END
+
+/* File: c/OP_UNUSED_6FFF.c */
+HANDLE_OPCODE(OP_UNUSED_6FFF)
+OP_END
+
+/* File: c/OP_UNUSED_70FF.c */
+HANDLE_OPCODE(OP_UNUSED_70FF)
+OP_END
+
+/* File: c/OP_UNUSED_71FF.c */
+HANDLE_OPCODE(OP_UNUSED_71FF)
+OP_END
+
+/* File: c/OP_UNUSED_72FF.c */
+HANDLE_OPCODE(OP_UNUSED_72FF)
+OP_END
+
+/* File: c/OP_UNUSED_73FF.c */
+HANDLE_OPCODE(OP_UNUSED_73FF)
+OP_END
+
+/* File: c/OP_UNUSED_74FF.c */
+HANDLE_OPCODE(OP_UNUSED_74FF)
+OP_END
+
+/* File: c/OP_UNUSED_75FF.c */
+HANDLE_OPCODE(OP_UNUSED_75FF)
+OP_END
+
+/* File: c/OP_UNUSED_76FF.c */
+HANDLE_OPCODE(OP_UNUSED_76FF)
+OP_END
+
+/* File: c/OP_UNUSED_77FF.c */
+HANDLE_OPCODE(OP_UNUSED_77FF)
+OP_END
+
+/* File: c/OP_UNUSED_78FF.c */
+HANDLE_OPCODE(OP_UNUSED_78FF)
+OP_END
+
+/* File: c/OP_UNUSED_79FF.c */
+HANDLE_OPCODE(OP_UNUSED_79FF)
+OP_END
+
+/* File: c/OP_UNUSED_7AFF.c */
+HANDLE_OPCODE(OP_UNUSED_7AFF)
+OP_END
+
+/* File: c/OP_UNUSED_7BFF.c */
+HANDLE_OPCODE(OP_UNUSED_7BFF)
+OP_END
+
+/* File: c/OP_UNUSED_7CFF.c */
+HANDLE_OPCODE(OP_UNUSED_7CFF)
+OP_END
+
+/* File: c/OP_UNUSED_7DFF.c */
+HANDLE_OPCODE(OP_UNUSED_7DFF)
+OP_END
+
+/* File: c/OP_UNUSED_7EFF.c */
+HANDLE_OPCODE(OP_UNUSED_7EFF)
+OP_END
+
+/* File: c/OP_UNUSED_7FFF.c */
+HANDLE_OPCODE(OP_UNUSED_7FFF)
+OP_END
+
+/* File: c/OP_UNUSED_80FF.c */
+HANDLE_OPCODE(OP_UNUSED_80FF)
+OP_END
+
+/* File: c/OP_UNUSED_81FF.c */
+HANDLE_OPCODE(OP_UNUSED_81FF)
+OP_END
+
+/* File: c/OP_UNUSED_82FF.c */
+HANDLE_OPCODE(OP_UNUSED_82FF)
+OP_END
+
+/* File: c/OP_UNUSED_83FF.c */
+HANDLE_OPCODE(OP_UNUSED_83FF)
+OP_END
+
+/* File: c/OP_UNUSED_84FF.c */
+HANDLE_OPCODE(OP_UNUSED_84FF)
+OP_END
+
+/* File: c/OP_UNUSED_85FF.c */
+HANDLE_OPCODE(OP_UNUSED_85FF)
+OP_END
+
+/* File: c/OP_UNUSED_86FF.c */
+HANDLE_OPCODE(OP_UNUSED_86FF)
+OP_END
+
+/* File: c/OP_UNUSED_87FF.c */
+HANDLE_OPCODE(OP_UNUSED_87FF)
+OP_END
+
+/* File: c/OP_UNUSED_88FF.c */
+HANDLE_OPCODE(OP_UNUSED_88FF)
+OP_END
+
+/* File: c/OP_UNUSED_89FF.c */
+HANDLE_OPCODE(OP_UNUSED_89FF)
+OP_END
+
+/* File: c/OP_UNUSED_8AFF.c */
+HANDLE_OPCODE(OP_UNUSED_8AFF)
+OP_END
+
+/* File: c/OP_UNUSED_8BFF.c */
+HANDLE_OPCODE(OP_UNUSED_8BFF)
+OP_END
+
+/* File: c/OP_UNUSED_8CFF.c */
+HANDLE_OPCODE(OP_UNUSED_8CFF)
+OP_END
+
+/* File: c/OP_UNUSED_8DFF.c */
+HANDLE_OPCODE(OP_UNUSED_8DFF)
+OP_END
+
+/* File: c/OP_UNUSED_8EFF.c */
+HANDLE_OPCODE(OP_UNUSED_8EFF)
+OP_END
+
+/* File: c/OP_UNUSED_8FFF.c */
+HANDLE_OPCODE(OP_UNUSED_8FFF)
+OP_END
+
+/* File: c/OP_UNUSED_90FF.c */
+HANDLE_OPCODE(OP_UNUSED_90FF)
+OP_END
+
+/* File: c/OP_UNUSED_91FF.c */
+HANDLE_OPCODE(OP_UNUSED_91FF)
+OP_END
+
+/* File: c/OP_UNUSED_92FF.c */
+HANDLE_OPCODE(OP_UNUSED_92FF)
+OP_END
+
+/* File: c/OP_UNUSED_93FF.c */
+HANDLE_OPCODE(OP_UNUSED_93FF)
+OP_END
+
+/* File: c/OP_UNUSED_94FF.c */
+HANDLE_OPCODE(OP_UNUSED_94FF)
+OP_END
+
+/* File: c/OP_UNUSED_95FF.c */
+HANDLE_OPCODE(OP_UNUSED_95FF)
+OP_END
+
+/* File: c/OP_UNUSED_96FF.c */
+HANDLE_OPCODE(OP_UNUSED_96FF)
+OP_END
+
+/* File: c/OP_UNUSED_97FF.c */
+HANDLE_OPCODE(OP_UNUSED_97FF)
+OP_END
+
+/* File: c/OP_UNUSED_98FF.c */
+HANDLE_OPCODE(OP_UNUSED_98FF)
+OP_END
+
+/* File: c/OP_UNUSED_99FF.c */
+HANDLE_OPCODE(OP_UNUSED_99FF)
+OP_END
+
+/* File: c/OP_UNUSED_9AFF.c */
+HANDLE_OPCODE(OP_UNUSED_9AFF)
+OP_END
+
+/* File: c/OP_UNUSED_9BFF.c */
+HANDLE_OPCODE(OP_UNUSED_9BFF)
+OP_END
+
+/* File: c/OP_UNUSED_9CFF.c */
+HANDLE_OPCODE(OP_UNUSED_9CFF)
+OP_END
+
+/* File: c/OP_UNUSED_9DFF.c */
+HANDLE_OPCODE(OP_UNUSED_9DFF)
+OP_END
+
+/* File: c/OP_UNUSED_9EFF.c */
+HANDLE_OPCODE(OP_UNUSED_9EFF)
+OP_END
+
+/* File: c/OP_UNUSED_9FFF.c */
+HANDLE_OPCODE(OP_UNUSED_9FFF)
+OP_END
+
+/* File: c/OP_UNUSED_A0FF.c */
+HANDLE_OPCODE(OP_UNUSED_A0FF)
+OP_END
+
+/* File: c/OP_UNUSED_A1FF.c */
+HANDLE_OPCODE(OP_UNUSED_A1FF)
+OP_END
+
+/* File: c/OP_UNUSED_A2FF.c */
+HANDLE_OPCODE(OP_UNUSED_A2FF)
+OP_END
+
+/* File: c/OP_UNUSED_A3FF.c */
+HANDLE_OPCODE(OP_UNUSED_A3FF)
+OP_END
+
+/* File: c/OP_UNUSED_A4FF.c */
+HANDLE_OPCODE(OP_UNUSED_A4FF)
+OP_END
+
+/* File: c/OP_UNUSED_A5FF.c */
+HANDLE_OPCODE(OP_UNUSED_A5FF)
+OP_END
+
+/* File: c/OP_UNUSED_A6FF.c */
+HANDLE_OPCODE(OP_UNUSED_A6FF)
+OP_END
+
+/* File: c/OP_UNUSED_A7FF.c */
+HANDLE_OPCODE(OP_UNUSED_A7FF)
+OP_END
+
+/* File: c/OP_UNUSED_A8FF.c */
+HANDLE_OPCODE(OP_UNUSED_A8FF)
+OP_END
+
+/* File: c/OP_UNUSED_A9FF.c */
+HANDLE_OPCODE(OP_UNUSED_A9FF)
+OP_END
+
+/* File: c/OP_UNUSED_AAFF.c */
+HANDLE_OPCODE(OP_UNUSED_AAFF)
+OP_END
+
+/* File: c/OP_UNUSED_ABFF.c */
+HANDLE_OPCODE(OP_UNUSED_ABFF)
+OP_END
+
+/* File: c/OP_UNUSED_ACFF.c */
+HANDLE_OPCODE(OP_UNUSED_ACFF)
+OP_END
+
+/* File: c/OP_UNUSED_ADFF.c */
+HANDLE_OPCODE(OP_UNUSED_ADFF)
+OP_END
+
+/* File: c/OP_UNUSED_AEFF.c */
+HANDLE_OPCODE(OP_UNUSED_AEFF)
+OP_END
+
+/* File: c/OP_UNUSED_AFFF.c */
+HANDLE_OPCODE(OP_UNUSED_AFFF)
+OP_END
+
+/* File: c/OP_UNUSED_B0FF.c */
+HANDLE_OPCODE(OP_UNUSED_B0FF)
+OP_END
+
+/* File: c/OP_UNUSED_B1FF.c */
+HANDLE_OPCODE(OP_UNUSED_B1FF)
+OP_END
+
+/* File: c/OP_UNUSED_B2FF.c */
+HANDLE_OPCODE(OP_UNUSED_B2FF)
+OP_END
+
+/* File: c/OP_UNUSED_B3FF.c */
+HANDLE_OPCODE(OP_UNUSED_B3FF)
+OP_END
+
+/* File: c/OP_UNUSED_B4FF.c */
+HANDLE_OPCODE(OP_UNUSED_B4FF)
+OP_END
+
+/* File: c/OP_UNUSED_B5FF.c */
+HANDLE_OPCODE(OP_UNUSED_B5FF)
+OP_END
+
+/* File: c/OP_UNUSED_B6FF.c */
+HANDLE_OPCODE(OP_UNUSED_B6FF)
+OP_END
+
+/* File: c/OP_UNUSED_B7FF.c */
+HANDLE_OPCODE(OP_UNUSED_B7FF)
+OP_END
+
+/* File: c/OP_UNUSED_B8FF.c */
+HANDLE_OPCODE(OP_UNUSED_B8FF)
+OP_END
+
+/* File: c/OP_UNUSED_B9FF.c */
+HANDLE_OPCODE(OP_UNUSED_B9FF)
+OP_END
+
+/* File: c/OP_UNUSED_BAFF.c */
+HANDLE_OPCODE(OP_UNUSED_BAFF)
+OP_END
+
+/* File: c/OP_UNUSED_BBFF.c */
+HANDLE_OPCODE(OP_UNUSED_BBFF)
+OP_END
+
+/* File: c/OP_UNUSED_BCFF.c */
+HANDLE_OPCODE(OP_UNUSED_BCFF)
+OP_END
+
+/* File: c/OP_UNUSED_BDFF.c */
+HANDLE_OPCODE(OP_UNUSED_BDFF)
+OP_END
+
+/* File: c/OP_UNUSED_BEFF.c */
+HANDLE_OPCODE(OP_UNUSED_BEFF)
+OP_END
+
+/* File: c/OP_UNUSED_BFFF.c */
+HANDLE_OPCODE(OP_UNUSED_BFFF)
+OP_END
+
+/* File: c/OP_UNUSED_C0FF.c */
+HANDLE_OPCODE(OP_UNUSED_C0FF)
+OP_END
+
+/* File: c/OP_UNUSED_C1FF.c */
+HANDLE_OPCODE(OP_UNUSED_C1FF)
+OP_END
+
+/* File: c/OP_UNUSED_C2FF.c */
+HANDLE_OPCODE(OP_UNUSED_C2FF)
+OP_END
+
+/* File: c/OP_UNUSED_C3FF.c */
+HANDLE_OPCODE(OP_UNUSED_C3FF)
+OP_END
+
+/* File: c/OP_UNUSED_C4FF.c */
+HANDLE_OPCODE(OP_UNUSED_C4FF)
+OP_END
+
+/* File: c/OP_UNUSED_C5FF.c */
+HANDLE_OPCODE(OP_UNUSED_C5FF)
+OP_END
+
+/* File: c/OP_UNUSED_C6FF.c */
+HANDLE_OPCODE(OP_UNUSED_C6FF)
+OP_END
+
+/* File: c/OP_UNUSED_C7FF.c */
+HANDLE_OPCODE(OP_UNUSED_C7FF)
+OP_END
+
+/* File: c/OP_UNUSED_C8FF.c */
+HANDLE_OPCODE(OP_UNUSED_C8FF)
+OP_END
+
+/* File: c/OP_UNUSED_C9FF.c */
+HANDLE_OPCODE(OP_UNUSED_C9FF)
+OP_END
+
+/* File: c/OP_UNUSED_CAFF.c */
+HANDLE_OPCODE(OP_UNUSED_CAFF)
+OP_END
+
+/* File: c/OP_UNUSED_CBFF.c */
+HANDLE_OPCODE(OP_UNUSED_CBFF)
+OP_END
+
+/* File: c/OP_UNUSED_CCFF.c */
+HANDLE_OPCODE(OP_UNUSED_CCFF)
+OP_END
+
+/* File: c/OP_UNUSED_CDFF.c */
+HANDLE_OPCODE(OP_UNUSED_CDFF)
+OP_END
+
+/* File: c/OP_UNUSED_CEFF.c */
+HANDLE_OPCODE(OP_UNUSED_CEFF)
+OP_END
+
+/* File: c/OP_UNUSED_CFFF.c */
+HANDLE_OPCODE(OP_UNUSED_CFFF)
+OP_END
+
+/* File: c/OP_UNUSED_D0FF.c */
+HANDLE_OPCODE(OP_UNUSED_D0FF)
+OP_END
+
+/* File: c/OP_UNUSED_D1FF.c */
+HANDLE_OPCODE(OP_UNUSED_D1FF)
+OP_END
+
+/* File: c/OP_UNUSED_D2FF.c */
+HANDLE_OPCODE(OP_UNUSED_D2FF)
+OP_END
+
+/* File: c/OP_UNUSED_D3FF.c */
+HANDLE_OPCODE(OP_UNUSED_D3FF)
+OP_END
+
+/* File: c/OP_UNUSED_D4FF.c */
+HANDLE_OPCODE(OP_UNUSED_D4FF)
+OP_END
+
+/* File: c/OP_UNUSED_D5FF.c */
+HANDLE_OPCODE(OP_UNUSED_D5FF)
+OP_END
+
+/* File: c/OP_UNUSED_D6FF.c */
+HANDLE_OPCODE(OP_UNUSED_D6FF)
+OP_END
+
+/* File: c/OP_UNUSED_D7FF.c */
+HANDLE_OPCODE(OP_UNUSED_D7FF)
+OP_END
+
+/* File: c/OP_UNUSED_D8FF.c */
+HANDLE_OPCODE(OP_UNUSED_D8FF)
+OP_END
+
+/* File: c/OP_UNUSED_D9FF.c */
+HANDLE_OPCODE(OP_UNUSED_D9FF)
+OP_END
+
+/* File: c/OP_UNUSED_DAFF.c */
+HANDLE_OPCODE(OP_UNUSED_DAFF)
+OP_END
+
+/* File: c/OP_UNUSED_DBFF.c */
+HANDLE_OPCODE(OP_UNUSED_DBFF)
+OP_END
+
+/* File: c/OP_UNUSED_DCFF.c */
+HANDLE_OPCODE(OP_UNUSED_DCFF)
+OP_END
+
+/* File: c/OP_UNUSED_DDFF.c */
+HANDLE_OPCODE(OP_UNUSED_DDFF)
+OP_END
+
+/* File: c/OP_UNUSED_DEFF.c */
+HANDLE_OPCODE(OP_UNUSED_DEFF)
+OP_END
+
+/* File: c/OP_UNUSED_DFFF.c */
+HANDLE_OPCODE(OP_UNUSED_DFFF)
+OP_END
+
+/* File: c/OP_UNUSED_E0FF.c */
+HANDLE_OPCODE(OP_UNUSED_E0FF)
+OP_END
+
+/* File: c/OP_UNUSED_E1FF.c */
+HANDLE_OPCODE(OP_UNUSED_E1FF)
+OP_END
+
+/* File: c/OP_UNUSED_E2FF.c */
+HANDLE_OPCODE(OP_UNUSED_E2FF)
+OP_END
+
+/* File: c/OP_UNUSED_E3FF.c */
+HANDLE_OPCODE(OP_UNUSED_E3FF)
+OP_END
+
+/* File: c/OP_UNUSED_E4FF.c */
+HANDLE_OPCODE(OP_UNUSED_E4FF)
+OP_END
+
+/* File: c/OP_UNUSED_E5FF.c */
+HANDLE_OPCODE(OP_UNUSED_E5FF)
+OP_END
+
+/* File: c/OP_UNUSED_E6FF.c */
+HANDLE_OPCODE(OP_UNUSED_E6FF)
+OP_END
+
+/* File: c/OP_UNUSED_E7FF.c */
+HANDLE_OPCODE(OP_UNUSED_E7FF)
+OP_END
+
+/* File: c/OP_UNUSED_E8FF.c */
+HANDLE_OPCODE(OP_UNUSED_E8FF)
+OP_END
+
+/* File: c/OP_UNUSED_E9FF.c */
+HANDLE_OPCODE(OP_UNUSED_E9FF)
+OP_END
+
+/* File: c/OP_UNUSED_EAFF.c */
+HANDLE_OPCODE(OP_UNUSED_EAFF)
+OP_END
+
+/* File: c/OP_UNUSED_EBFF.c */
+HANDLE_OPCODE(OP_UNUSED_EBFF)
+OP_END
+
+/* File: c/OP_UNUSED_ECFF.c */
+HANDLE_OPCODE(OP_UNUSED_ECFF)
+OP_END
+
+/* File: c/OP_UNUSED_EDFF.c */
+HANDLE_OPCODE(OP_UNUSED_EDFF)
+OP_END
+
+/* File: c/OP_UNUSED_EEFF.c */
+HANDLE_OPCODE(OP_UNUSED_EEFF)
+OP_END
+
+/* File: c/OP_UNUSED_EFFF.c */
+HANDLE_OPCODE(OP_UNUSED_EFFF)
+OP_END
+
+/* File: c/OP_UNUSED_F0FF.c */
+HANDLE_OPCODE(OP_UNUSED_F0FF)
+OP_END
+
+/* File: c/OP_UNUSED_F1FF.c */
+HANDLE_OPCODE(OP_UNUSED_F1FF)
+    /*
      * In portable interp, most unused opcodes will fall through to here.
      */
-    LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+    LOGE("unknown opcode 0x%04x\n", inst);
     dvmAbort();
     FINISH(1);
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
+
+/* File: c/OP_IGET_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_THROW_VERIFICATION_ERROR_JUMBO.c */
+HANDLE_OPCODE(OP_THROW_VERIFICATION_ERROR_JUMBO)
+    EXPORT_PC();
+    vsrc1 = FETCH(3);
+    ref = FETCH(1) | (u4)FETCH(2) << 16;      /* class/field/method ref */
+    dvmThrowVerificationError(curMethod, vsrc1, ref);
+    GOTO_exceptionThrown();
+OP_END
+
 /* File: c/gotoTargets.c */
 /*
  * C footer.  This has some common code shared by the various targets.
@@ -3163,7 +4502,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -3174,19 +4513,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -3200,7 +4548,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -3216,13 +4564,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -3255,35 +4602,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -3324,8 +4685,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3355,26 +4715,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -3409,15 +4780,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3433,32 +4802,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -3483,25 +4863,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -3518,19 +4909,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -3547,13 +4947,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -3590,13 +4990,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3612,7 +5011,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -3637,11 +5036,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -3657,8 +5056,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -3706,7 +5104,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -3760,8 +5158,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -3793,7 +5191,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -3818,7 +5216,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -4051,7 +5449,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -4064,18 +5462,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -4102,7 +5496,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
@@ -4157,7 +5551,7 @@
 bail:
     ILOGD("|-- Leaving interpreter loop");      // note "curMethod" may be NULL
 
-    interpState->retval = retval;
+    self->retval = retval;
     return false;
 
 bail_switch:
@@ -4169,18 +5563,18 @@
      * TODO: figure out if preserving this makes any sense.
      */
 #if INTERP_TYPE == INTERP_DBG
-    interpState->debugIsMethodEntry = debugIsMethodEntry;
+    self->debugIsMethodEntry = debugIsMethodEntry;
 #else
-    interpState->debugIsMethodEntry = false;
+    self->debugIsMethodEntry = false;
 #endif
 
     /* export state changes */
-    interpState->method = curMethod;
-    interpState->pc = pc;
-    interpState->fp = fp;
+    self->interpSave.method = curMethod;
+    self->interpSave.pc = pc;
+    self->interpSave.fp = fp;
     /* debugTrackedRefStart doesn't change */
-    interpState->retval = retval;   /* need for _entryPoint=ret */
-    interpState->nextMode =
+    self->retval = retval;   /* need for _entryPoint=ret */
+    self->nextMode =
         (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
     LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
         curMethod->clazz->descriptor, curMethod->name,
diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c
index 9946bdb..7d9c1af 100644
--- a/vm/mterp/out/InterpC-x86-atom.c
+++ b/vm/mterp/out/InterpC-x86-atom.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: c/OP_IGET_VOLATILE.c */
 HANDLE_IGET_X(OP_IGET_VOLATILE,         "-volatile", IntVolatile, )
 OP_END
@@ -1308,6 +1406,36 @@
     FINISH(3);
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
+#endif
+        FINISH(3);
+    }
+OP_END
+
 /* File: c/OP_RETURN_VOID_BARRIER.c */
 HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
     ILOGV("|return-void");
@@ -1330,6 +1458,84 @@
 HANDLE_SPUT_X(OP_SPUT_OBJECT_VOLATILE,  "-object-volatile", ObjectVolatile, _AS_OBJECT)
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
+
+/* File: c/OP_IGET_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
 /* File: c/gotoTargets.c */
 /*
  * C footer.  This has some common code shared by the various targets.
@@ -1341,7 +1547,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -1352,19 +1558,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -1378,7 +1593,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -1394,13 +1609,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -1433,35 +1647,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -1502,8 +1730,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1533,26 +1760,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -1587,15 +1825,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1611,32 +1847,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -1661,25 +1908,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -1696,19 +1954,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -1725,13 +1992,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -1768,13 +2035,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1790,7 +2056,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -1815,11 +2081,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -1835,8 +2101,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1884,7 +2149,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -1938,8 +2203,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -1971,7 +2236,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -1996,7 +2261,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -2229,7 +2494,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -2242,18 +2507,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -2280,7 +2541,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
index 8a7a1d5..5152e85 100644
--- a/vm/mterp/out/InterpC-x86.c
+++ b/vm/mterp/out/InterpC-x86.c
@@ -58,24 +58,31 @@
 #endif
 
 /*
- * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
- * can't just use pointers to copy 64-bit values out of our interpreted
- * register set, because gcc will generate ldrd/strd.
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
  *
- * The __UNION version copies data in and out of a union.  The __MEMCPY
- * version uses a memcpy() call to do the transfer; gcc is smart enough to
- * not actually call memcpy().  The __UNION version is very bad on ARM;
- * it only uses one more instruction than __MEMCPY, but for some reason
- * gcc thinks it needs separate storage for every instance of the union.
- * On top of that, it feels the need to zero them out at the start of the
- * method.  Net result is we zero out ~700 bytes of stack space at the top
- * of the interpreter using ARM STM instructions.
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
  */
 #if defined(__ARM_EABI__)
-//# define NO_UNALIGN_64__UNION
-# define NO_UNALIGN_64__MEMCPY
+# define NO_UNALIGN_64__UNION
 #endif
 
+
 //#define LOG_INSTR                   /* verbose debugging */
 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
 
@@ -171,12 +178,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.ll;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     s8 val;
     memcpy(&val, &ptr[idx], 8);
     return val;
-#else
-    return *((s8*) &ptr[idx]);
 #endif
 }
 
@@ -190,10 +195,8 @@
     conv.ll = val;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &val, 8);
 #else
-    *((s8*) &ptr[idx]) = val;
+    memcpy(&ptr[idx], &val, 8);
 #endif
 }
 
@@ -207,12 +210,10 @@
     conv.parts[0] = ptr[0];
     conv.parts[1] = ptr[1];
     return conv.d;
-#elif defined(NO_UNALIGN_64__MEMCPY)
+#else
     double dval;
     memcpy(&dval, &ptr[idx], 8);
     return dval;
-#else
-    return *((double*) &ptr[idx]);
 #endif
 }
 
@@ -226,10 +227,8 @@
     conv.d = dval;
     ptr[0] = conv.parts[0];
     ptr[1] = conv.parts[1];
-#elif defined(NO_UNALIGN_64__MEMCPY)
-    memcpy(&ptr[idx], &dval, 8);
 #else
-    *((double*) &ptr[idx]) = dval;
+    memcpy(&ptr[idx], &dval, 8);
 #endif
 }
 
@@ -318,10 +317,10 @@
 
 /*
  * The current PC must be available to Throwable constructors, e.g.
- * those created by dvmThrowException(), so that the exception stack
- * trace can be generated correctly.  If we don't do this, the offset
- * within the current method won't be shown correctly.  See the notes
- * in Exception.c.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
  *
  * This is also used to determine the address for precise GC.
  *
@@ -360,7 +359,7 @@
 static inline bool checkForNull(Object* obj)
 {
     if (obj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -392,7 +391,7 @@
 {
     if (obj == NULL) {
         EXPORT_PC();
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         return false;
     }
 #ifdef WITH_EXTRA_OBJECT_VALIDATION
@@ -418,7 +417,7 @@
 # define CHECK_TRACKED_REFS() ((void)0)
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
 
 /*
  * In the C mterp stubs, "goto" is a function call followed immediately
@@ -426,11 +425,11 @@
  */
 
 #define GOTO_TARGET_DECL(_target, ...)                                      \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
 
 /* (void)xxx to quiet unused variable compiler warnings. */
 #define GOTO_TARGET(_target, ...)                                           \
-    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         const Method* methodToCall;                                         \
@@ -441,16 +440,15 @@
 #define GOTO_TARGET_END }
 
 /*
- * Redefine what used to be local variable accesses into MterpGlue struct
+ * Redefine what used to be local variable accesses into Thread struct
  * references.  (These are undefined down in "footer.c".)
  */
-#define retval                  glue->retval
-#define pc                      glue->pc
-#define fp                      glue->fp
-#define curMethod               glue->method
-#define methodClassDex          glue->methodClassDex
-#define self                    glue->self
-#define debugTrackedRefStart    glue->debugTrackedRefStart
+#define retval                  self->retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.fp
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
 
 /* ugh */
 #define STUB_HACK(x) x
@@ -458,12 +456,12 @@
 
 /*
  * Opcode handler framing macros.  Here, each opcode is a separate function
- * that takes a "glue" argument and returns void.  We can't declare
+ * that takes a "self" argument and returns void.  We can't declare
  * these "static" because they may be called from an assembly stub.
  * (void)xxx to quiet unused variable compiler warnings.
  */
 #define HANDLE_OPCODE(_op)                                                  \
-    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+    void dvmMterp_##_op(Thread* self) {                                     \
         u2 ref, vsrc1, vsrc2, vdst;                                         \
         u2 inst = FETCH(0);                                                 \
         (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
@@ -490,25 +488,25 @@
 
 #define GOTO_exceptionThrown()                                              \
     do {                                                                    \
-        dvmMterp_exceptionThrown(glue);                                     \
+        dvmMterp_exceptionThrown(self);                                     \
         return;                                                             \
     } while(false)
 
 #define GOTO_returnFromMethod()                                             \
     do {                                                                    \
-        dvmMterp_returnFromMethod(glue);                                    \
+        dvmMterp_returnFromMethod(self);                                    \
         return;                                                             \
     } while(false)
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
-        dvmMterp_##_target(glue, _methodCallRange);                         \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
         return;                                                             \
     } while(false)
 
 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
     do {                                                                    \
-        dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall,        \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
             _vsrc1, _vdst);                                                 \
         return;                                                             \
     } while(false)
@@ -518,9 +516,9 @@
  * if we need to switch to the other interpreter upon our return.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(glue, false);
+    dvmMterpStdBail(self, false);
 #define GOTO_bail_switch()                                                  \
-    dvmMterpStdBail(glue, true);
+    dvmMterpStdBail(self, true);
 
 /*
  * Periodically check for thread suspension.
@@ -535,7 +533,7 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            glue->entryPoint = _entryPoint;                                 \
+            self->entryPoint = _entryPoint;                                 \
             LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
                 self->threadId, (_entryPoint), (_pcadj));                   \
             GOTO_bail_switch();                                             \
@@ -544,14 +542,14 @@
 
 /* File: c/opcommon.c */
 /* forward declarations of goto targets */
-GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
-GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
-GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
-GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
-GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
-GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
     u2 count, u2 regs);
 GOTO_TARGET_DECL(returnFromMethod);
@@ -694,8 +692,7 @@
             secondVal = GET_REGISTER(vsrc2);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -741,9 +738,8 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s2) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
-                GOTO_exceptionThrown();                                      \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
                 /* won't generate /lit16 instr for this; check anyway */    \
@@ -776,8 +772,7 @@
             firstVal = GET_REGISTER(vsrc1);                                 \
             if ((s1) vsrc2 == 0) {                                          \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
@@ -822,8 +817,7 @@
             secondVal = GET_REGISTER(vsrc1);                                \
             if (secondVal == 0) {                                           \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
@@ -865,8 +859,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc2);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -912,8 +905,7 @@
             secondVal = GET_REGISTER_WIDE(vsrc1);                           \
             if (secondVal == 0LL) {                                         \
                 EXPORT_PC();                                                \
-                dvmThrowException("Ljava/lang/ArithmeticException;",        \
-                    "divide by zero");                                      \
+                dvmThrowArithmeticException("divide by zero");              \
                 GOTO_exceptionThrown();                                     \
             }                                                               \
             if ((u8)firstVal == 0x8000000000000000ULL &&                    \
@@ -1003,7 +995,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
@@ -1027,7 +1020,8 @@
         if (!checkForNull((Object*) arrayObj))                              \
             GOTO_exceptionThrown();                                         \
         if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
-            dvmThrowAIOOBE(GET_REGISTER(vsrc2), arrayObj->length);          \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
             GOTO_exceptionThrown();                                         \
         }                                                                   \
         ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
@@ -1080,6 +1074,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1125,6 +1147,34 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(5);
+
 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
     HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
     {                                                                       \
@@ -1162,7 +1212,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
@@ -1172,6 +1222,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
     HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
     {                                                                       \
@@ -1186,7 +1260,7 @@
             if (sfield == NULL)                                             \
                 GOTO_exceptionThrown();                                     \
             if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
-                ABORT_JIT_TSELECT();                                        \
+                END_JIT_TSELECT();                                          \
             }                                                               \
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
@@ -1196,6 +1270,30 @@
     }                                                                       \
     FINISH(2);
 
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                END_JIT_TSELECT();                                          \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(4);
+
 /* File: c/OP_IGET_WIDE_VOLATILE.c */
 HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE,    "-wide-volatile", LongVolatile, _WIDE)
 OP_END
@@ -1257,6 +1355,36 @@
     FINISH(3);
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(2);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, false);
+        }
+#endif
+        FINISH(3);
+    }
+OP_END
+
 /* File: c/OP_RETURN_VOID_BARRIER.c */
 HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
     ILOGV("|return-void");
@@ -1267,6 +1395,84 @@
     GOTO_returnFromMethod();
 OP_END
 
+/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.c */
+HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = FETCH(4);               /* reg number of "this" pointer */
+        obj = GET_REGISTER_AS_OBJECT(vsrc1);
+
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO_exceptionThrown();
+
+        /*
+         * The object should be marked "finalizable" when Object.<init>
+         * completes normally.  We're going to assume it does complete
+         * (by virtue of being nothing but a return-void) and set it now.
+         */
+        if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+            dvmSetFinalizable(obj);
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (DEBUGGER_ACTIVE) {
+            /* behave like OP_INVOKE_DIRECT_RANGE */
+            GOTO_invoke(invokeDirect, true, true);
+        }
+#endif
+        FINISH(5);
+    }
+OP_END
+
+/* File: c/OP_IGET_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
+OP_END
+
+/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.c */
+HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
+OP_END
+
 /* File: c/gotoTargets.c */
 /*
  * C footer.  This has some common code shared by the various targets.
@@ -1278,7 +1484,7 @@
  * next instruction.  Here, these are subroutines that return to the caller.
  */
 
-GOTO_TARGET(filledNewArray, bool methodCallRange)
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
     {
         ClassObject* arrayClass;
         ArrayObject* newArray;
@@ -1289,19 +1495,28 @@
 
         EXPORT_PC();
 
-        ref = FETCH(1);             /* class ref */
-        vdst = FETCH(2);            /* first 4 regs -or- range base */
-
-        if (methodCallRange) {
-            vsrc1 = INST_AA(inst);  /* #of elements */
-            arg5 = -1;              /* silence compiler warning */
-            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
         } else {
-            arg5 = INST_A(inst);
-            vsrc1 = INST_B(inst);   /* #of elements */
-            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1, ref, vdst, arg5);
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
         }
 
         /*
@@ -1315,7 +1530,7 @@
         }
         /*
         if (!dvmIsArrayClass(arrayClass)) {
-            dvmThrowException("Ljava/lang/RuntimeError;",
+            dvmThrowRuntimeException(
                 "filled-new-array needs array class");
             GOTO_exceptionThrown();
         }
@@ -1331,13 +1546,12 @@
         typeCh = arrayClass->descriptor[1];
         if (typeCh == 'D' || typeCh == 'J') {
             /* category 2 primitives not allowed */
-            dvmThrowException("Ljava/lang/RuntimeError;",
-                "bad filled array req");
+            dvmThrowRuntimeException("bad filled array req");
             GOTO_exceptionThrown();
         } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
             /* TODO: requires multiple "fill in" loops with different widths */
             LOGE("non-int primitives not implemented\n");
-            dvmThrowException("Ljava/lang/InternalError;",
+            dvmThrowInternalError(
                 "filled-new-array not implemented for anything but 'int'");
             GOTO_exceptionThrown();
         }
@@ -1370,35 +1584,49 @@
 
         retval.l = newArray;
     }
-    FINISH(3);
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
 GOTO_TARGET_END
 
 
-GOTO_TARGET(invokeVirtual, bool methodCallRange)
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         Object* thisPtr;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
 
         if (!checkForNull(thisPtr))
@@ -1439,8 +1667,7 @@
              * Works fine unless Sub stops providing an implementation of
              * the method.
              */
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1470,26 +1697,37 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuper, bool methodCallRange)
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
     {
         Method* baseMethod;
         u2 thisReg;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        if (methodCallRange) {
-            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         /* impossible in well-formed code, but we must check nevertheless */
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
@@ -1524,15 +1762,13 @@
              * Method does not exist in the superclass.  Could happen if
              * superclass gets updated.
              */
-            dvmThrowException("Ljava/lang/NoSuchMethodError;",
-                baseMethod->name);
+            dvmThrowNoSuchMethodError(baseMethod->name);
             GOTO_exceptionThrown();
         }
         methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1548,32 +1784,43 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeInterface, bool methodCallRange)
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
         ClassObject* thisClass;
 
         EXPORT_PC();
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
-        /*
-         * The object against which we are executing a method is always
-         * in the first argument.
-         */
-        if (methodCallRange) {
-            assert(vsrc1 > 0);
-            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisPtr = (Object*) GET_REGISTER(vdst);
         } else {
-            assert((vsrc1>>4) > 0);
-            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
         }
+
         if (!checkForNull(thisPtr))
             GOTO_exceptionThrown();
 
@@ -1598,25 +1845,36 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeDirect, bool methodCallRange)
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
-        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-        ref = FETCH(1);             /* method ref */
-        vdst = FETCH(2);            /* 4 regs -or- first reg */
-
         EXPORT_PC();
 
-        if (methodCallRange) {
-            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
                 vsrc1, ref, vdst, vdst+vsrc1-1);
             thisReg = vdst;
         } else {
-            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
-                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
-            thisReg = vdst & 0x0f;
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
         }
+
         if (!checkForNull((Object*) GET_REGISTER(thisReg)))
             GOTO_exceptionThrown();
 
@@ -1633,19 +1891,28 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeStatic, bool methodCallRange)
-    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
-    ref = FETCH(1);             /* method ref */
-    vdst = FETCH(2);            /* 4 regs -or- first reg */
-
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
     EXPORT_PC();
 
-    if (methodCallRange)
-        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
             vsrc1, ref, vdst, vdst+vsrc1-1);
-    else
-        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
-            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
 
     methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
     if (methodToCall == NULL) {
@@ -1662,13 +1929,13 @@
          */
         if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
             /* Class initialization is still ongoing */
-            ABORT_JIT_TSELECT();
+            END_JIT_TSELECT();
         }
     }
     GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
     {
         Object* thisPtr;
 
@@ -1705,13 +1972,12 @@
          * Combine the object we found with the vtable offset in the
          * method.
          */
-        assert(ref < thisPtr->clazz->vtableCount);
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
         methodToCall = thisPtr->clazz->vtable[ref];
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1727,7 +1993,7 @@
     }
 GOTO_TARGET_END
 
-GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
     {
         u2 thisReg;
 
@@ -1752,11 +2018,11 @@
 
 #if 0   /* impossible in optimized + verified code */
         if (ref >= curMethod->clazz->super->vtableCount) {
-            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            dvmThrowNoSuchMethodError(NULL);
             GOTO_exceptionThrown();
         }
 #else
-        assert(ref < curMethod->clazz->super->vtableCount);
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
 #endif
 
         /*
@@ -1772,8 +2038,7 @@
 
 #if 0
         if (dvmIsAbstractMethod(methodToCall)) {
-            dvmThrowException("Ljava/lang/AbstractMethodError;",
-                "abstract method not implemented");
+            dvmThrowAbstractMethodError("abstract method not implemented");
             GOTO_exceptionThrown();
         }
 #else
@@ -1821,7 +2086,7 @@
 #endif
 
         /* back up to previous frame and see if we hit a break */
-        fp = saveArea->prevFrame;
+        fp = (u4*)saveArea->prevFrame;
         assert(fp != NULL);
         if (dvmIsBreakFrame(fp)) {
             /* bail without popping the method frame from stack */
@@ -1875,8 +2140,8 @@
         PERIODIC_CHECKS(kInterpEntryThrow, 0);
 
 #if defined(WITH_JIT)
-        // Something threw during trace selection - abort the current trace
-        ABORT_JIT_TSELECT();
+        // Something threw during trace selection - end the current trace
+        END_JIT_TSELECT();
 #endif
         /*
          * We save off the exception and clear the exception status.  While
@@ -1908,7 +2173,7 @@
          * here, and have the JNI exception code do the reporting to the
          * debugger.
          */
-        if (gDvm.debuggerActive) {
+        if (DEBUGGER_ACTIVE) {
             void* catchFrame;
             catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
                         exception, true, &catchFrame);
@@ -1933,7 +2198,7 @@
          * the "catch" blocks.
          */
         catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
-                    exception, false, (void*)&fp);
+                    exception, false, (void**)(void*)&fp);
 
         /*
          * Restore the stack bounds after an overflow.  This isn't going to
@@ -2166,7 +2431,7 @@
             curMethod = methodToCall;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            fp = self->curFrame = newFp;
+            self->curFrame = fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -2179,18 +2444,14 @@
             FINISH(0);                              // jump to method start
         } else {
             /* set this up for JNI locals, even if not a JNI native */
-#ifdef USE_INDIRECT_REF
             newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
-#else
-            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
-#endif
 
             self->curFrame = newFp;
 
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
             }
@@ -2217,7 +2478,7 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
 #if (INTERP_TYPE == INTERP_DBG)
-            if (gDvm.debuggerActive) {
+            if (DEBUGGER_ACTIVE) {
                 dvmDbgPostLocationEvent(methodToCall, -1,
                     dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
             }
diff --git a/vm/mterp/portable/debug.c b/vm/mterp/portable/debug.c
index 1d06188..e9fe72b 100644
--- a/vm/mterp/portable/debug.c
+++ b/vm/mterp/portable/debug.c
@@ -194,7 +194,7 @@
         static const char* mn = "shiftTest2";
         static const char* sg = "()V";
 
-        if (/*gDvm.debuggerActive &&*/
+        if (/*DEBUGGER_ACTIVE &&*/
             strcmp(method->clazz->descriptor, cd) == 0 &&
             strcmp(method->name, mn) == 0 &&
             strcmp(method->shorty, sg) == 0)
@@ -205,7 +205,7 @@
             dumpRegs(method, fp, true);
         }
 
-        if (!gDvm.debuggerActive)
+        if (!DEBUGGER_ACTIVE)
             *pIsMethodEntry = false;
     }
 #endif
@@ -222,7 +222,7 @@
         *pIsMethodEntry = false;
         TRACE_METHOD_ENTER(self, method);
     }
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         updateDebugger(method, pc, fp, isEntry, self);
     }
     if (gDvm.instructionCountEnableCount != 0) {
diff --git a/vm/mterp/portable/enddefs.c b/vm/mterp/portable/enddefs.c
index 30deedc..6f28d8b 100644
--- a/vm/mterp/portable/enddefs.c
+++ b/vm/mterp/portable/enddefs.c
@@ -8,7 +8,7 @@
 bail:
     ILOGD("|-- Leaving interpreter loop");      // note "curMethod" may be NULL
 
-    interpState->retval = retval;
+    self->retval = retval;
     return false;
 
 bail_switch:
@@ -20,18 +20,18 @@
      * TODO: figure out if preserving this makes any sense.
      */
 #if INTERP_TYPE == INTERP_DBG
-    interpState->debugIsMethodEntry = debugIsMethodEntry;
+    self->debugIsMethodEntry = debugIsMethodEntry;
 #else
-    interpState->debugIsMethodEntry = false;
+    self->debugIsMethodEntry = false;
 #endif
 
     /* export state changes */
-    interpState->method = curMethod;
-    interpState->pc = pc;
-    interpState->fp = fp;
+    self->interpSave.method = curMethod;
+    self->interpSave.pc = pc;
+    self->interpSave.fp = fp;
     /* debugTrackedRefStart doesn't change */
-    interpState->retval = retval;   /* need for _entryPoint=ret */
-    interpState->nextMode =
+    self->retval = retval;   /* need for _entryPoint=ret */
+    self->nextMode =
         (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
     LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
         curMethod->clazz->descriptor, curMethod->name,
diff --git a/vm/mterp/portable/entry.c b/vm/mterp/portable/entry.c
index 56649e7..c3139ee 100644
--- a/vm/mterp/portable/entry.c
+++ b/vm/mterp/portable/entry.c
@@ -3,17 +3,17 @@
  *
  * This was written with an ARM implementation in mind.
  */
-bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+bool INTERP_FUNC_NAME(Thread* self)
 {
 #if defined(EASY_GDB)
     StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     bool debugIsMethodEntry = false;
-    debugIsMethodEntry = interpState->debugIsMethodEntry;
+    debugIsMethodEntry = self->debugIsMethodEntry;
 #endif
 #if defined(WITH_TRACKREF_CHECKS)
-    int debugTrackedRefStart = interpState->debugTrackedRefStart;
+    int debugTrackedRefStart = self->interpSave.debugTrackedRefStart;
 #endif
     DvmDex* methodClassDex;     // curMethod->clazz->pDvmDex
     JValue retval;
@@ -24,11 +24,12 @@
     u4* fp;                     // frame pointer
     u2 inst;                    // current instruction
     /* instruction decoding */
-    u2 ref;                     // 16-bit quantity fetched directly
+    u4 ref;                     // 16 or 32-bit quantity fetched directly
     u2 vsrc1, vsrc2, vdst;      // usually used for register indexes
     /* method call setup */
     const Method* methodToCall;
     bool methodCallRange;
+    bool jumboFormat;
 
 
 #if defined(THREADED_INTERP)
@@ -39,16 +40,16 @@
 #if defined(WITH_JIT)
 #if 0
     LOGD("*DebugInterp - entrypoint is %d, tgt is 0x%x, %s\n",
-         interpState->entryPoint,
-         interpState->pc,
-         interpState->method->name);
+         self->entryPoint,
+         self->interpSave.pc,
+         self->interpSave.method->name);
 #endif
 #if INTERP_TYPE == INTERP_DBG
     const ClassObject* callsiteClass = NULL;
 
 #if defined(WITH_SELF_VERIFICATION)
-    if (interpState->jitState != kJitSelfVerification) {
-        interpState->self->shadowSpace->jitExitState = kSVSIdle;
+    if (self->jitState != kJitSelfVerification) {
+        self->shadowSpace->jitExitState = kSVSIdle;
     }
 #endif
 
@@ -61,11 +62,11 @@
           * dvmJitCheckTraceRequest will change the jitState to kJitDone but
           * but stay in the dbg interpreter.
           */
-         (interpState->entryPoint == kInterpEntryInstr) &&
-         (interpState->jitState == kJitTSelectRequest ||
-          interpState->jitState == kJitTSelectRequestHot) &&
-         dvmJitCheckTraceRequest(self, interpState)) {
-        interpState->nextMode = INTERP_STD;
+         (self->entryPoint == kInterpEntryInstr) &&
+         (self->jitState == kJitTSelectRequest ||
+          self->jitState == kJitTSelectRequestHot) &&
+         dvmJitCheckTraceRequest(self)) {
+        self->nextMode = INTERP_STD;
         //LOGD("Invalid trace request, exiting\n");
         return true;
     }
@@ -73,17 +74,17 @@
 #endif /* WITH_JIT */
 
     /* copy state in */
-    curMethod = interpState->method;
-    pc = interpState->pc;
-    fp = interpState->fp;
-    retval = interpState->retval;   /* only need for kInterpEntryReturn? */
+    curMethod = self->interpSave.method;
+    pc = self->interpSave.pc;
+    fp = self->interpSave.fp;
+    retval = self->retval;   /* only need for kInterpEntryReturn? */
 
     methodClassDex = curMethod->clazz->pDvmDex;
 
     LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
-        self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+        self->threadId, (self->nextMode == INTERP_STD) ? "STD" : "DBG",
         curMethod->clazz->descriptor, curMethod->name, pc - curMethod->insns,
-        fp, interpState->entryPoint);
+        fp, self->entryPoint);
 
     /*
      * DEBUG: scramble this to ensure we're not relying on it.
@@ -94,11 +95,11 @@
     if (debugIsMethodEntry) {
         ILOGD("|-- Now interpreting %s.%s", curMethod->clazz->descriptor,
                 curMethod->name);
-        DUMP_REGS(curMethod, interpState->fp, false);
+        DUMP_REGS(curMethod, self->interpSave.fp, false);
     }
 #endif
 
-    switch (interpState->entryPoint) {
+    switch (self->entryPoint) {
     case kInterpEntryInstr:
         /* just fall through to instruction loop or threaded kickstart */
         break;
diff --git a/vm/mterp/portable/portdbg.c b/vm/mterp/portable/portdbg.c
index 65349e9..4334627 100644
--- a/vm/mterp/portable/portdbg.c
+++ b/vm/mterp/portable/portdbg.c
@@ -5,13 +5,13 @@
     checkDebugAndProf(pc, fp, self, curMethod, &debugIsMethodEntry)
 
 #if defined(WITH_JIT)
-#define CHECK_JIT_BOOL() (dvmCheckJit(pc, self, interpState, callsiteClass,\
+#define CHECK_JIT_BOOL() (dvmCheckJit(pc, self, callsiteClass,\
                           methodToCall))
-#define CHECK_JIT_VOID() (dvmCheckJit(pc, self, interpState, callsiteClass,\
+#define CHECK_JIT_VOID() (dvmCheckJit(pc, self, callsiteClass,\
                           methodToCall))
-#define ABORT_JIT_TSELECT() (dvmJitAbortTraceSelect(interpState))
+#define END_JIT_TSELECT() (dvmJitEndTraceSelect(self))
 #else
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT(x) ((void)0)
+#define END_JIT_TSELECT(x) ((void)0)
 #endif
diff --git a/vm/mterp/portable/portstd.c b/vm/mterp/portable/portstd.c
index f37c22b..1c2b4ea 100644
--- a/vm/mterp/portable/portstd.c
+++ b/vm/mterp/portable/portstd.c
@@ -5,4 +5,4 @@
 
 #define CHECK_JIT_BOOL() (false)
 #define CHECK_JIT_VOID()
-#define ABORT_JIT_TSELECT() ((void)0)
+#define END_JIT_TSELECT() ((void)0)
diff --git a/vm/mterp/portable/stubdefs.c b/vm/mterp/portable/stubdefs.c
index b46bb3a..84bb6d3 100644
--- a/vm/mterp/portable/stubdefs.c
+++ b/vm/mterp/portable/stubdefs.c
@@ -36,10 +36,14 @@
 # define FINISH_BKPT(_opcode) {                                             \
         goto *handlerTable[_opcode];                                        \
     }
+# define DISPATCH_EXTENDED(_opcode) {                                       \
+        goto *handlerTable[0x100 + _opcode];                                \
+    }
 #else
 # define HANDLE_OPCODE(_op) case _op:
 # define FINISH(_offset)    { ADJUST_PC(_offset); break; }
 # define FINISH_BKPT(opcode) { > not implemented < }
+# define DISPATCH_EXTENDED(opcode) goto case (0x100 + opcode);
 #endif
 
 #define OP_END
@@ -61,9 +65,10 @@
 
 #define GOTO_returnFromMethod() goto returnFromMethod;
 
-#define GOTO_invoke(_target, _methodCallRange)                              \
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
     do {                                                                    \
         methodCallRange = _methodCallRange;                                 \
+        jumboFormat = _jumboFormat;                                         \
         goto _target;                                                       \
     } while(false)
 
@@ -86,10 +91,10 @@
         }                                                                   \
         if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
             ADJUST_PC(_pcadj);                                              \
-            interpState->entryPoint = _entryPoint;                          \
+            self->entryPoint = _entryPoint;                          \
             LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n",               \
                 self->threadId,                                             \
-                (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",      \
+                (self->nextMode == INTERP_STD) ? "STD" : "DBG",      \
                 (_entryPoint), (_pcadj));                                   \
             GOTO_bail_switch();                                             \
         }                                                                   \
diff --git a/vm/mterp/x86-atom/OP_CHECK_CAST.S b/vm/mterp/x86-atom/OP_CHECK_CAST.S
index bbbdb0f..c78f336 100644
--- a/vm/mterp/x86-atom/OP_CHECK_CAST.S
+++ b/vm/mterp/x86-atom/OP_CHECK_CAST.S
@@ -73,6 +73,13 @@
     */
 
     EXPORT_PC                           # we will throw an exception
+#error BIT ROT!!!
+    /*
+     * TODO: Code here needs to call dvmThrowClassCastException with two
+     * arguments.
+     */
+#if 0
+    /* old obsolete code that called dvmThrowExceptionWithClassMessage */
     movl        $$.LstrClassCastExceptionPtr, -8(%esp) # push parameter message
     movl        offObject_clazz(rINST), rINST # rINST<- obj->clazz
     movl        offClassObject_descriptor(rINST), rINST # rINST<- obj->clazz->descriptor
@@ -81,6 +88,7 @@
     call        dvmThrowExceptionWithClassMessage # call: (const char* exceptionDescriptor,
                                                   #       const char* messageDescriptor, Object* cause)
                                                   # return: void
+#endif
     lea         8(%esp), %esp
     jmp         common_exceptionThrown
 
@@ -108,6 +116,3 @@
     je          common_exceptionThrown  # handle excpetion
     movl        %eax, %ecx              # %ecx<- resolved class
     jmp         .L${opcode}_resolved
-
-.LstrClassCastExceptionPtr:
-.asciz      "Ljava/lang/ClassCastException;"
diff --git a/vm/mterp/x86-atom/OP_CHECK_CAST_JUMBO.S b/vm/mterp/x86-atom/OP_CHECK_CAST_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_CHECK_CAST_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_CONST_CLASS_JUMBO.S b/vm/mterp/x86-atom/OP_CONST_CLASS_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_CONST_CLASS_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_FILLED_NEW_ARRAY_JUMBO.S b/vm/mterp/x86-atom/OP_FILLED_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_FILLED_NEW_ARRAY_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_BOOLEAN_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_BOOLEAN_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_BYTE_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_BYTE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_CHAR_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_CHAR_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_OBJECT_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_OBJECT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_SHORT_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_SHORT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IGET_WIDE_JUMBO.S b/vm/mterp/x86-atom/OP_IGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IGET_WIDE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INSTANCE_OF_JUMBO.S b/vm/mterp/x86-atom/OP_INSTANCE_OF_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INSTANCE_OF_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INVOKE_DIRECT_EMPTY.S b/vm/mterp/x86-atom/OP_INVOKE_DIRECT_EMPTY.S
deleted file mode 100644
index 85c0418..0000000
--- a/vm/mterp/x86-atom/OP_INVOKE_DIRECT_EMPTY.S
+++ /dev/null
@@ -1,26 +0,0 @@
-   /* Copyright (C) 2008 The Android Open Source Project
-    *
-    * Licensed under the Apache License, Version 2.0 (the "License");
-    * you may not use this file except in compliance with the License.
-    * You may obtain a copy of the License at
-    *
-    * http://www.apache.org/licenses/LICENSE-2.0
-    *
-    * Unless required by applicable law or agreed to in writing, software
-    * distributed under the License is distributed on an "AS IS" BASIS,
-    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    * See the License for the specific language governing permissions and
-    * limitations under the License.
-    */
-
-   /*
-    * File: OP_INVOKE_DIRECT_EMPTY.S
-    *
-    * Code: Used as a no-op. Uses no substitutions.
-    *
-    * For: invoke-direct-empty
-    *
-    * Format: B|A|op CCCC G|F|E|D (35c)
-    */
-
-    FINISH 3
diff --git a/vm/mterp/x86-atom/OP_INVOKE_DIRECT_JUMBO.S b/vm/mterp/x86-atom/OP_INVOKE_DIRECT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_DIRECT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INVOKE_INTERFACE_JUMBO.S b/vm/mterp/x86-atom/OP_INVOKE_INTERFACE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_INTERFACE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/x86-atom/OP_INVOKE_OBJECT_INIT_RANGE.S
new file mode 100644
index 0000000..2459a3c
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -0,0 +1,29 @@
+   /* Copyright (C) 2008 The Android Open Source Project
+    *
+    * Licensed under the Apache License, Version 2.0 (the "License");
+    * you may not use this file except in compliance with the License.
+    * You may obtain a copy of the License at
+    *
+    * http://www.apache.org/licenses/LICENSE-2.0
+    *
+    * Unless required by applicable law or agreed to in writing, software
+    * distributed under the License is distributed on an "AS IS" BASIS,
+    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    * See the License for the specific language governing permissions and
+    * limitations under the License.
+    */
+
+   /*
+    * File: OP_INVOKE_OBJECT_INIT.S
+    *
+    * Code: TODO
+    *
+    * For: invoke-object-init
+    *
+    * Format: B|A|op CCCC G|F|E|D (35c)
+    */
+
+<<<<<<< HEAD:vm/mterp/x86-atom/OP_INVOKE_OBJECT_INIT_RANGE.S
+=======
+    FINISH 3
+>>>>>>> 10185db0:vm/mterp/x86-atom/OP_INVOKE_DIRECT_EMPTY.S
diff --git a/vm/mterp/x86-atom/OP_INVOKE_STATIC_JUMBO.S b/vm/mterp/x86-atom/OP_INVOKE_STATIC_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_STATIC_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INVOKE_SUPER_JUMBO.S b/vm/mterp/x86-atom/OP_INVOKE_SUPER_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_SUPER_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_INVOKE_VIRTUAL_JUMBO.S b/vm/mterp/x86-atom/OP_INVOKE_VIRTUAL_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_INVOKE_VIRTUAL_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_BOOLEAN_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_BYTE_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_BYTE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_CHAR_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_CHAR_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_OBJECT_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_OBJECT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_SHORT_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_SHORT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_IPUT_WIDE_JUMBO.S b/vm/mterp/x86-atom/OP_IPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_IPUT_WIDE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_MONITOR_ENTER.S b/vm/mterp/x86-atom/OP_MONITOR_ENTER.S
index d3fada3..39d0e7b 100644
--- a/vm/mterp/x86-atom/OP_MONITOR_ENTER.S
+++ b/vm/mterp/x86-atom/OP_MONITOR_ENTER.S
@@ -33,9 +33,7 @@
     GET_VREG    rINST                   # rINST<- vAA
     cmp         $$0, rINST              # check for null object
     movl        offGlue_self(%eax), %eax # %eax<- glue->self
-#ifdef WITH_MONITOR_TRACKING
-    EXPORT_PC   # export PC so we can grab stack trace
-#endif
+    EXPORT_PC   # need for precise GC
     je          common_errNullObject    # handle null object
 #    jmp         .L${opcode}_finish
 #%break
@@ -48,11 +46,4 @@
                                         # return: void
     FFETCH_ADV  1, %edx                 # %edx<- next instruction hi; fetch, advance
     lea         8(%esp), %esp
-#ifdef WITH_DEADLOCK_PREDICTION
-    movl        rGLUE, %eax             # %eax<- pMterpGlue
-    movl        offGlue_self(%eax), %eax # %eax<- glue->self
-    movl        offThread_exception(%eax), %eax # %eax<- glue->self->exception
-    cmp         $$0, %eax               # check for exception
-    jne         common_exceptionThrown  # handle exception
-#endif
     FGETOP_JMP  1, %edx                 # jump to next instruction; getop, jmp
diff --git a/vm/mterp/x86-atom/OP_NEW_ARRAY_JUMBO.S b/vm/mterp/x86-atom/OP_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_NEW_ARRAY_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_NEW_INSTANCE_JUMBO.S b/vm/mterp/x86-atom/OP_NEW_INSTANCE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_NEW_INSTANCE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_BOOLEAN_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_BOOLEAN_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_BYTE_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_BYTE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_CHAR_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_CHAR_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_OBJECT_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_OBJECT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_SHORT_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_SHORT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SGET_WIDE_JUMBO.S b/vm/mterp/x86-atom/OP_SGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SGET_WIDE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_BOOLEAN_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_BYTE_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_BYTE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_CHAR_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_CHAR_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_OBJECT_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_OBJECT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_SHORT_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_SHORT_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_SPUT_WIDE_JUMBO.S b/vm/mterp/x86-atom/OP_SPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_SPUT_WIDE_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_THROW_VERIFICATION_ERROR_JUMBO.S b/vm/mterp/x86-atom/OP_THROW_VERIFICATION_ERROR_JUMBO.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_THROW_VERIFICATION_ERROR_JUMBO.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_27FF.S b/vm/mterp/x86-atom/OP_UNUSED_27FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_27FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_28FF.S b/vm/mterp/x86-atom/OP_UNUSED_28FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_28FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_29FF.S b/vm/mterp/x86-atom/OP_UNUSED_29FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_29FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2AFF.S b/vm/mterp/x86-atom/OP_UNUSED_2AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2BFF.S b/vm/mterp/x86-atom/OP_UNUSED_2BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2CFF.S b/vm/mterp/x86-atom/OP_UNUSED_2CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2DFF.S b/vm/mterp/x86-atom/OP_UNUSED_2DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2EFF.S b/vm/mterp/x86-atom/OP_UNUSED_2EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_2FFF.S b/vm/mterp/x86-atom/OP_UNUSED_2FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_2FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_30FF.S b/vm/mterp/x86-atom/OP_UNUSED_30FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_30FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_31FF.S b/vm/mterp/x86-atom/OP_UNUSED_31FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_31FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_32FF.S b/vm/mterp/x86-atom/OP_UNUSED_32FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_32FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_33FF.S b/vm/mterp/x86-atom/OP_UNUSED_33FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_33FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_34FF.S b/vm/mterp/x86-atom/OP_UNUSED_34FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_34FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_35FF.S b/vm/mterp/x86-atom/OP_UNUSED_35FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_35FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_36FF.S b/vm/mterp/x86-atom/OP_UNUSED_36FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_36FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_37FF.S b/vm/mterp/x86-atom/OP_UNUSED_37FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_37FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_38FF.S b/vm/mterp/x86-atom/OP_UNUSED_38FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_38FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_39FF.S b/vm/mterp/x86-atom/OP_UNUSED_39FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_39FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3AFF.S b/vm/mterp/x86-atom/OP_UNUSED_3AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3BFF.S b/vm/mterp/x86-atom/OP_UNUSED_3BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3CFF.S b/vm/mterp/x86-atom/OP_UNUSED_3CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3DFF.S b/vm/mterp/x86-atom/OP_UNUSED_3DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3EFF.S b/vm/mterp/x86-atom/OP_UNUSED_3EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_3FFF.S b/vm/mterp/x86-atom/OP_UNUSED_3FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_3FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_40FF.S b/vm/mterp/x86-atom/OP_UNUSED_40FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_40FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_41FF.S b/vm/mterp/x86-atom/OP_UNUSED_41FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_41FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_42FF.S b/vm/mterp/x86-atom/OP_UNUSED_42FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_42FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_43FF.S b/vm/mterp/x86-atom/OP_UNUSED_43FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_43FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_44FF.S b/vm/mterp/x86-atom/OP_UNUSED_44FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_44FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_45FF.S b/vm/mterp/x86-atom/OP_UNUSED_45FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_45FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_46FF.S b/vm/mterp/x86-atom/OP_UNUSED_46FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_46FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_47FF.S b/vm/mterp/x86-atom/OP_UNUSED_47FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_47FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_48FF.S b/vm/mterp/x86-atom/OP_UNUSED_48FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_48FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_49FF.S b/vm/mterp/x86-atom/OP_UNUSED_49FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_49FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4AFF.S b/vm/mterp/x86-atom/OP_UNUSED_4AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4BFF.S b/vm/mterp/x86-atom/OP_UNUSED_4BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4CFF.S b/vm/mterp/x86-atom/OP_UNUSED_4CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4DFF.S b/vm/mterp/x86-atom/OP_UNUSED_4DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4EFF.S b/vm/mterp/x86-atom/OP_UNUSED_4EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_4FFF.S b/vm/mterp/x86-atom/OP_UNUSED_4FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_4FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_50FF.S b/vm/mterp/x86-atom/OP_UNUSED_50FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_50FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_51FF.S b/vm/mterp/x86-atom/OP_UNUSED_51FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_51FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_52FF.S b/vm/mterp/x86-atom/OP_UNUSED_52FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_52FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_53FF.S b/vm/mterp/x86-atom/OP_UNUSED_53FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_53FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_54FF.S b/vm/mterp/x86-atom/OP_UNUSED_54FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_54FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_55FF.S b/vm/mterp/x86-atom/OP_UNUSED_55FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_55FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_56FF.S b/vm/mterp/x86-atom/OP_UNUSED_56FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_56FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_57FF.S b/vm/mterp/x86-atom/OP_UNUSED_57FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_57FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_58FF.S b/vm/mterp/x86-atom/OP_UNUSED_58FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_58FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_59FF.S b/vm/mterp/x86-atom/OP_UNUSED_59FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_59FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5AFF.S b/vm/mterp/x86-atom/OP_UNUSED_5AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5BFF.S b/vm/mterp/x86-atom/OP_UNUSED_5BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5CFF.S b/vm/mterp/x86-atom/OP_UNUSED_5CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5DFF.S b/vm/mterp/x86-atom/OP_UNUSED_5DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5EFF.S b/vm/mterp/x86-atom/OP_UNUSED_5EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_5FFF.S b/vm/mterp/x86-atom/OP_UNUSED_5FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_5FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_60FF.S b/vm/mterp/x86-atom/OP_UNUSED_60FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_60FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_61FF.S b/vm/mterp/x86-atom/OP_UNUSED_61FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_61FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_62FF.S b/vm/mterp/x86-atom/OP_UNUSED_62FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_62FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_63FF.S b/vm/mterp/x86-atom/OP_UNUSED_63FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_63FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_64FF.S b/vm/mterp/x86-atom/OP_UNUSED_64FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_64FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_65FF.S b/vm/mterp/x86-atom/OP_UNUSED_65FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_65FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_66FF.S b/vm/mterp/x86-atom/OP_UNUSED_66FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_66FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_67FF.S b/vm/mterp/x86-atom/OP_UNUSED_67FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_67FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_68FF.S b/vm/mterp/x86-atom/OP_UNUSED_68FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_68FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_69FF.S b/vm/mterp/x86-atom/OP_UNUSED_69FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_69FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6AFF.S b/vm/mterp/x86-atom/OP_UNUSED_6AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6BFF.S b/vm/mterp/x86-atom/OP_UNUSED_6BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6CFF.S b/vm/mterp/x86-atom/OP_UNUSED_6CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6DFF.S b/vm/mterp/x86-atom/OP_UNUSED_6DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6EFF.S b/vm/mterp/x86-atom/OP_UNUSED_6EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_6FFF.S b/vm/mterp/x86-atom/OP_UNUSED_6FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_6FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_70FF.S b/vm/mterp/x86-atom/OP_UNUSED_70FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_70FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_71FF.S b/vm/mterp/x86-atom/OP_UNUSED_71FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_71FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_72FF.S b/vm/mterp/x86-atom/OP_UNUSED_72FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_72FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_73FF.S b/vm/mterp/x86-atom/OP_UNUSED_73FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_73FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_74FF.S b/vm/mterp/x86-atom/OP_UNUSED_74FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_74FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_75FF.S b/vm/mterp/x86-atom/OP_UNUSED_75FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_75FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_76FF.S b/vm/mterp/x86-atom/OP_UNUSED_76FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_76FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_77FF.S b/vm/mterp/x86-atom/OP_UNUSED_77FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_77FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_78FF.S b/vm/mterp/x86-atom/OP_UNUSED_78FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_78FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_79FF.S b/vm/mterp/x86-atom/OP_UNUSED_79FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_79FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7AFF.S b/vm/mterp/x86-atom/OP_UNUSED_7AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7BFF.S b/vm/mterp/x86-atom/OP_UNUSED_7BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7CFF.S b/vm/mterp/x86-atom/OP_UNUSED_7CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7DFF.S b/vm/mterp/x86-atom/OP_UNUSED_7DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7EFF.S b/vm/mterp/x86-atom/OP_UNUSED_7EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_7FFF.S b/vm/mterp/x86-atom/OP_UNUSED_7FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_7FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_80FF.S b/vm/mterp/x86-atom/OP_UNUSED_80FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_80FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_81FF.S b/vm/mterp/x86-atom/OP_UNUSED_81FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_81FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_82FF.S b/vm/mterp/x86-atom/OP_UNUSED_82FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_82FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_83FF.S b/vm/mterp/x86-atom/OP_UNUSED_83FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_83FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_84FF.S b/vm/mterp/x86-atom/OP_UNUSED_84FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_84FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_85FF.S b/vm/mterp/x86-atom/OP_UNUSED_85FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_85FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_86FF.S b/vm/mterp/x86-atom/OP_UNUSED_86FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_86FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_87FF.S b/vm/mterp/x86-atom/OP_UNUSED_87FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_87FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_88FF.S b/vm/mterp/x86-atom/OP_UNUSED_88FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_88FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_89FF.S b/vm/mterp/x86-atom/OP_UNUSED_89FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_89FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8AFF.S b/vm/mterp/x86-atom/OP_UNUSED_8AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8BFF.S b/vm/mterp/x86-atom/OP_UNUSED_8BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8CFF.S b/vm/mterp/x86-atom/OP_UNUSED_8CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8DFF.S b/vm/mterp/x86-atom/OP_UNUSED_8DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8EFF.S b/vm/mterp/x86-atom/OP_UNUSED_8EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_8FFF.S b/vm/mterp/x86-atom/OP_UNUSED_8FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_8FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_90FF.S b/vm/mterp/x86-atom/OP_UNUSED_90FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_90FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_91FF.S b/vm/mterp/x86-atom/OP_UNUSED_91FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_91FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_92FF.S b/vm/mterp/x86-atom/OP_UNUSED_92FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_92FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_93FF.S b/vm/mterp/x86-atom/OP_UNUSED_93FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_93FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_94FF.S b/vm/mterp/x86-atom/OP_UNUSED_94FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_94FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_95FF.S b/vm/mterp/x86-atom/OP_UNUSED_95FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_95FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_96FF.S b/vm/mterp/x86-atom/OP_UNUSED_96FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_96FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_97FF.S b/vm/mterp/x86-atom/OP_UNUSED_97FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_97FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_98FF.S b/vm/mterp/x86-atom/OP_UNUSED_98FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_98FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_99FF.S b/vm/mterp/x86-atom/OP_UNUSED_99FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_99FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9AFF.S b/vm/mterp/x86-atom/OP_UNUSED_9AFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9AFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9BFF.S b/vm/mterp/x86-atom/OP_UNUSED_9BFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9BFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9CFF.S b/vm/mterp/x86-atom/OP_UNUSED_9CFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9CFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9DFF.S b/vm/mterp/x86-atom/OP_UNUSED_9DFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9DFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9EFF.S b/vm/mterp/x86-atom/OP_UNUSED_9EFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9EFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_9FFF.S b/vm/mterp/x86-atom/OP_UNUSED_9FFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_9FFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A0FF.S b/vm/mterp/x86-atom/OP_UNUSED_A0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A1FF.S b/vm/mterp/x86-atom/OP_UNUSED_A1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A2FF.S b/vm/mterp/x86-atom/OP_UNUSED_A2FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A2FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A3FF.S b/vm/mterp/x86-atom/OP_UNUSED_A3FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A3FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A4FF.S b/vm/mterp/x86-atom/OP_UNUSED_A4FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A4FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A5FF.S b/vm/mterp/x86-atom/OP_UNUSED_A5FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A5FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A6FF.S b/vm/mterp/x86-atom/OP_UNUSED_A6FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A6FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A7FF.S b/vm/mterp/x86-atom/OP_UNUSED_A7FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A7FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A8FF.S b/vm/mterp/x86-atom/OP_UNUSED_A8FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A8FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_A9FF.S b/vm/mterp/x86-atom/OP_UNUSED_A9FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_A9FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_AAFF.S b/vm/mterp/x86-atom/OP_UNUSED_AAFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_AAFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_ABFF.S b/vm/mterp/x86-atom/OP_UNUSED_ABFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_ABFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_ACFF.S b/vm/mterp/x86-atom/OP_UNUSED_ACFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_ACFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_ADFF.S b/vm/mterp/x86-atom/OP_UNUSED_ADFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_ADFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_AEFF.S b/vm/mterp/x86-atom/OP_UNUSED_AEFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_AEFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_AFFF.S b/vm/mterp/x86-atom/OP_UNUSED_AFFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_AFFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B0FF.S b/vm/mterp/x86-atom/OP_UNUSED_B0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B1FF.S b/vm/mterp/x86-atom/OP_UNUSED_B1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B2FF.S b/vm/mterp/x86-atom/OP_UNUSED_B2FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B2FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B3FF.S b/vm/mterp/x86-atom/OP_UNUSED_B3FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B3FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B4FF.S b/vm/mterp/x86-atom/OP_UNUSED_B4FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B4FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B5FF.S b/vm/mterp/x86-atom/OP_UNUSED_B5FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B5FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B6FF.S b/vm/mterp/x86-atom/OP_UNUSED_B6FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B6FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B7FF.S b/vm/mterp/x86-atom/OP_UNUSED_B7FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B7FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B8FF.S b/vm/mterp/x86-atom/OP_UNUSED_B8FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B8FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_B9FF.S b/vm/mterp/x86-atom/OP_UNUSED_B9FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_B9FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BAFF.S b/vm/mterp/x86-atom/OP_UNUSED_BAFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BAFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BBFF.S b/vm/mterp/x86-atom/OP_UNUSED_BBFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BBFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BCFF.S b/vm/mterp/x86-atom/OP_UNUSED_BCFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BCFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BDFF.S b/vm/mterp/x86-atom/OP_UNUSED_BDFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BDFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BEFF.S b/vm/mterp/x86-atom/OP_UNUSED_BEFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BEFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_BFFF.S b/vm/mterp/x86-atom/OP_UNUSED_BFFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_BFFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C0FF.S b/vm/mterp/x86-atom/OP_UNUSED_C0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C1FF.S b/vm/mterp/x86-atom/OP_UNUSED_C1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C2FF.S b/vm/mterp/x86-atom/OP_UNUSED_C2FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C2FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C3FF.S b/vm/mterp/x86-atom/OP_UNUSED_C3FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C3FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C4FF.S b/vm/mterp/x86-atom/OP_UNUSED_C4FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C4FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C5FF.S b/vm/mterp/x86-atom/OP_UNUSED_C5FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C5FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C6FF.S b/vm/mterp/x86-atom/OP_UNUSED_C6FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C6FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C7FF.S b/vm/mterp/x86-atom/OP_UNUSED_C7FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C7FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C8FF.S b/vm/mterp/x86-atom/OP_UNUSED_C8FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C8FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_C9FF.S b/vm/mterp/x86-atom/OP_UNUSED_C9FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_C9FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CAFF.S b/vm/mterp/x86-atom/OP_UNUSED_CAFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CAFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CBFF.S b/vm/mterp/x86-atom/OP_UNUSED_CBFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CBFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CCFF.S b/vm/mterp/x86-atom/OP_UNUSED_CCFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CCFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CDFF.S b/vm/mterp/x86-atom/OP_UNUSED_CDFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CDFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CEFF.S b/vm/mterp/x86-atom/OP_UNUSED_CEFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CEFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_CFFF.S b/vm/mterp/x86-atom/OP_UNUSED_CFFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_CFFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D0FF.S b/vm/mterp/x86-atom/OP_UNUSED_D0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D1FF.S b/vm/mterp/x86-atom/OP_UNUSED_D1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D2FF.S b/vm/mterp/x86-atom/OP_UNUSED_D2FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D2FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D3FF.S b/vm/mterp/x86-atom/OP_UNUSED_D3FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D3FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D4FF.S b/vm/mterp/x86-atom/OP_UNUSED_D4FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D4FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D5FF.S b/vm/mterp/x86-atom/OP_UNUSED_D5FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D5FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D6FF.S b/vm/mterp/x86-atom/OP_UNUSED_D6FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D6FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D7FF.S b/vm/mterp/x86-atom/OP_UNUSED_D7FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D7FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D8FF.S b/vm/mterp/x86-atom/OP_UNUSED_D8FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D8FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_D9FF.S b/vm/mterp/x86-atom/OP_UNUSED_D9FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_D9FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DAFF.S b/vm/mterp/x86-atom/OP_UNUSED_DAFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DAFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DBFF.S b/vm/mterp/x86-atom/OP_UNUSED_DBFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DBFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DCFF.S b/vm/mterp/x86-atom/OP_UNUSED_DCFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DCFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DDFF.S b/vm/mterp/x86-atom/OP_UNUSED_DDFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DDFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DEFF.S b/vm/mterp/x86-atom/OP_UNUSED_DEFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DEFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_DFFF.S b/vm/mterp/x86-atom/OP_UNUSED_DFFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_DFFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E0FF.S b/vm/mterp/x86-atom/OP_UNUSED_E0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E1FF.S b/vm/mterp/x86-atom/OP_UNUSED_E1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E2FF.S b/vm/mterp/x86-atom/OP_UNUSED_E2FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E2FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E3FF.S b/vm/mterp/x86-atom/OP_UNUSED_E3FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E3FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E4FF.S b/vm/mterp/x86-atom/OP_UNUSED_E4FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E4FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E5FF.S b/vm/mterp/x86-atom/OP_UNUSED_E5FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E5FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E6FF.S b/vm/mterp/x86-atom/OP_UNUSED_E6FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E6FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E7FF.S b/vm/mterp/x86-atom/OP_UNUSED_E7FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E7FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E8FF.S b/vm/mterp/x86-atom/OP_UNUSED_E8FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E8FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E9FF.S b/vm/mterp/x86-atom/OP_UNUSED_E9FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_E9FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_EAFF.S b/vm/mterp/x86-atom/OP_UNUSED_EAFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_EAFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_EBFF.S b/vm/mterp/x86-atom/OP_UNUSED_EBFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_EBFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_ECFF.S b/vm/mterp/x86-atom/OP_UNUSED_ECFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_ECFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_EDFF.S b/vm/mterp/x86-atom/OP_UNUSED_EDFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_EDFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_EEFF.S b/vm/mterp/x86-atom/OP_UNUSED_EEFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_EEFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_EFFF.S b/vm/mterp/x86-atom/OP_UNUSED_EFFF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_EFFF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_F0FF.S b/vm/mterp/x86-atom/OP_UNUSED_F0FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_F0FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_F1FF.S b/vm/mterp/x86-atom/OP_UNUSED_F1FF.S
new file mode 100644
index 0000000..ebae8b7
--- /dev/null
+++ b/vm/mterp/x86-atom/OP_UNUSED_F1FF.S
@@ -0,0 +1 @@
+%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/TODO.txt b/vm/mterp/x86-atom/TODO.txt
index 61bdd43..dba18d4 100644
--- a/vm/mterp/x86-atom/TODO.txt
+++ b/vm/mterp/x86-atom/TODO.txt
@@ -10,12 +10,25 @@
      assumed to be enabled)
 (hi) Implement OP_DISPATCH_FF for real. (Right now it's treated as
      an unused instruction.)
+(hi) Rename dvmJitGetCodeAddr to dvmJitGetTraceAddr.
+(hi) Remove references to rGLUE and replace with rSELF
+(hi) Rework footer.s's suspend check to reflect suspendCount change
+(hi) Rework interpreter to co-exist the new switching model which
+     elminiates a separate debug interpreter.
 
+(md) Add implementations for jumbo opcodes (40 instructions) and
+     their volatile variants (13 instructions)
 (md) Correct OP_MONITOR_EXIT (need to adjust PC before throw)
 (md) OP_THROW needs to export the PC
-(md) Use dvmThrowAIOOBE(index, length) for array bounds errors.
+(md) Use dvmThrowArrayIndexOutOfBoundsException(length, index) for
+     array bounds errors.
 (md) Use dvmThrowClassCastException(actual, desired) for class cast errors.
 (md) Use dvmThrowArrayStoreException(actual, desired) for array store errors.
+(md) Use dvmThrowNegativeArraySizeException(len) forarray alloc errors
+(md) Replace any remaining use of dvmThrowException with proper helper function
+
 (lo) Implement OP_BREAKPOINT
 (lo) Implement OP_*_VOLATILE (12 instructions)
 (lo) Implement OP_RETURN_VOID_BARRIER
+(lo) Implement OP_INVOKE_OBJECT_INIT
+(lo) Implement dvmJitScanAllClassPointers
diff --git a/vm/mterp/x86-atom/entry.S b/vm/mterp/x86-atom/entry.S
index d60d458..3b8a3da 100644
--- a/vm/mterp/x86-atom/entry.S
+++ b/vm/mterp/x86-atom/entry.S
@@ -366,7 +366,7 @@
 .long .L_OP_THROW_VERIFICATION_ERROR
 .long .L_OP_EXECUTE_INLINE
 .long .L_OP_EXECUTE_INLINE_RANGE
-.long .L_OP_INVOKE_DIRECT_EMPTY
+.long .L_OP_INVOKE_OBJECT_INIT_RANGE
 .long .L_OP_UNUSED_F1
 .long .L_OP_IGET_QUICK
 .long .L_OP_IGET_WIDE_QUICK
diff --git a/vm/mterp/x86-atom/footer.S b/vm/mterp/x86-atom/footer.S
index cb9970d..7b5ed9c 100644
--- a/vm/mterp/x86-atom/footer.S
+++ b/vm/mterp/x86-atom/footer.S
@@ -646,8 +646,6 @@
     .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
 .LstrArrayStoreException:
     .asciz "Ljava/lang/ArrayStoreException;"
-.LstrClassCastException:
-    .asciz "Ljava/lang/ClassCastException;"
 .LstrDivideByZero:
     .asciz "divide by zero"
 .LstrInstantiationError:
diff --git a/vm/mterp/x86/OP_ADD_LONG.S b/vm/mterp/x86/OP_ADD_LONG.S
index b1edd3d..bc157f6 100644
--- a/vm/mterp/x86/OP_ADD_LONG.S
+++ b/vm/mterp/x86/OP_ADD_LONG.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/binopWide.S" {"instr1":"addl (rFP,%ecx,4),%edx", "instr2":"adcl 4(rFP,%ecx,4),%eax"}
+%include "x86/binopWide.S" {"instr1":"addl (rFP,%ecx,4),rIBASE", "instr2":"adcl 4(rFP,%ecx,4),%eax"}
diff --git a/vm/mterp/x86/OP_AGET.S b/vm/mterp/x86/OP_AGET.S
index 65ff582..42dfa0a 100644
--- a/vm/mterp/x86/OP_AGET.S
+++ b/vm/mterp/x86/OP_AGET.S
@@ -18,7 +18,7 @@
                                         #    index in ecx
     $load     offArrayObject_contents(%eax,%ecx,$shift),%eax
 .L${opcode}_finish:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG  %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_AGET_WIDE.S b/vm/mterp/x86/OP_AGET_WIDE.S
index 96d1c89..32266bc 100644
--- a/vm/mterp/x86/OP_AGET_WIDE.S
+++ b/vm/mterp/x86/OP_AGET_WIDE.S
@@ -11,18 +11,14 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .L${opcode}_finish        # index < length, OK
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects
+    jae       common_errArrayIndex      # index >= length, bail.  Expects
                                         #    arrayObj in eax
                                         #    index in ecx
-%break
-
-.L${opcode}_finish:
     leal      offArrayObject_contents(%eax,%ecx,8),%eax
     movl      (%eax),%ecx
     movl      4(%eax),%eax
     SET_VREG_WORD %ecx rINST 0
     SET_VREG_WORD %eax rINST 1
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_AND_LONG.S b/vm/mterp/x86/OP_AND_LONG.S
index 8a1152e..06df873 100644
--- a/vm/mterp/x86/OP_AND_LONG.S
+++ b/vm/mterp/x86/OP_AND_LONG.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/binopWide.S" {"instr1":"andl (rFP,%ecx,4),%edx", "instr2":"andl 4(rFP,%ecx,4),%eax"}
+%include "x86/binopWide.S" {"instr1":"andl (rFP,%ecx,4),rIBASE", "instr2":"andl 4(rFP,%ecx,4),%eax"}
diff --git a/vm/mterp/x86/OP_APUT.S b/vm/mterp/x86/OP_APUT.S
index 797b692..f51c9c7 100644
--- a/vm/mterp/x86/OP_APUT.S
+++ b/vm/mterp/x86/OP_APUT.S
@@ -1,4 +1,4 @@
-%default { "reg":"%ecx", "store":"movl", "shift":"4" }
+%default { "reg":"rINST", "store":"movl", "shift":"4" }
 %verify "executed"
     /*
      * Array put, 32 bits or less.  vBB[vCC] <- vAA
@@ -18,8 +18,8 @@
                                         #   index in ecx
     leal      offArrayObject_contents(%eax,%ecx,$shift),%eax
 .L${opcode}_finish:
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
     $store     $reg,(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_APUT_BOOLEAN.S b/vm/mterp/x86/OP_APUT_BOOLEAN.S
index 14f8c7f..fb1e8db 100644
--- a/vm/mterp/x86/OP_APUT_BOOLEAN.S
+++ b/vm/mterp/x86/OP_APUT_BOOLEAN.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/OP_APUT.S" {"reg":"%cl", "store":"movb", "shift":"1" }
+%include "x86/OP_APUT.S" {"reg":"rINSTbl", "store":"movb", "shift":"1" }
diff --git a/vm/mterp/x86/OP_APUT_BYTE.S b/vm/mterp/x86/OP_APUT_BYTE.S
index d92225f..366c8c5 100644
--- a/vm/mterp/x86/OP_APUT_BYTE.S
+++ b/vm/mterp/x86/OP_APUT_BYTE.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/OP_APUT.S" { "reg":"%cl", "store":"movb", "shift":"1" }
+%include "x86/OP_APUT.S" { "reg":"rINSTbl", "store":"movb", "shift":"1" }
diff --git a/vm/mterp/x86/OP_APUT_CHAR.S b/vm/mterp/x86/OP_APUT_CHAR.S
index d466007..9c87384 100644
--- a/vm/mterp/x86/OP_APUT_CHAR.S
+++ b/vm/mterp/x86/OP_APUT_CHAR.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/OP_APUT.S" { "reg":"%cx", "store":"movw", "shift":"2" }
+%include "x86/OP_APUT.S" { "reg":"rINSTw", "store":"movw", "shift":"2" }
diff --git a/vm/mterp/x86/OP_APUT_OBJECT.S b/vm/mterp/x86/OP_APUT_OBJECT.S
index 9c93f39..4dd579c 100644
--- a/vm/mterp/x86/OP_APUT_OBJECT.S
+++ b/vm/mterp/x86/OP_APUT_OBJECT.S
@@ -13,18 +13,14 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .L${opcode}_continue
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects
+    jae       common_errArrayIndex      # index >= length, bail.  Expects
                                         #    arrayObj in eax
                                         #    index in ecx
-%break
-
     /* On entry:
      *   eax<- array object
      *   ecx<- index
      *   rINST<- vAA
      */
-.L${opcode}_continue:
     leal      offArrayObject_contents(%eax,%ecx,4),%ecx
     testl     rINST,rINST                    # storing null reference?
     je        .L${opcode}_skip_check
@@ -36,10 +32,12 @@
     movl      %ecx,OUT_ARG0(%esp)
     movl      %ecx,sReg0                     # store the two classes for later
     movl      %eax,sReg1
+    SPILL(rIBASE)
     call      dvmCanPutArrayElement          # test object type vs. array type
+    UNSPILL(rIBASE)
     UNSPILL_TMP1(%ecx)                       # recover target address
     testl     %eax,%eax
-    movl      rGLUE,%eax
+    movl      rSELF,%eax
     jne       .L${opcode}_types_okay
 
     # The types don't match.  We need to throw an ArrayStoreException.
@@ -52,17 +50,17 @@
     jmp       common_exceptionThrown
 
 .L${opcode}_types_okay:
-    movl      offGlue_cardTable(%eax),%eax   # get card table base
+    movl      offThread_cardTable(%eax),%eax   # get card table base
     movl      rINST,(%ecx)                   # store into array
-    UNSPILL_TMP2(%ecx)                       # recover object head
-    FETCH_INST_OPCODE 2 %edx
-    shrl      $$GC_CARD_SHIFT,%ecx           # object head to card number
-    movb      %al,(%eax,%ecx)                # mark card using object head
+    UNSPILL_TMP2(rINST)                      # recover object head
+    FETCH_INST_OPCODE 2 %ecx
+    shrl      $$GC_CARD_SHIFT,rINST          # object head to card number
+    movb      %al,(%eax,rINST)               # mark card using object head
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_skip_check:
     movl      rINST,(%ecx)
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_APUT_SHORT.S b/vm/mterp/x86/OP_APUT_SHORT.S
index d466007..9c87384 100644
--- a/vm/mterp/x86/OP_APUT_SHORT.S
+++ b/vm/mterp/x86/OP_APUT_SHORT.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/OP_APUT.S" { "reg":"%cx", "store":"movw", "shift":"2" }
+%include "x86/OP_APUT.S" { "reg":"rINSTw", "store":"movw", "shift":"2" }
diff --git a/vm/mterp/x86/OP_APUT_WIDE.S b/vm/mterp/x86/OP_APUT_WIDE.S
index 3647c3e..cd1e723 100644
--- a/vm/mterp/x86/OP_APUT_WIDE.S
+++ b/vm/mterp/x86/OP_APUT_WIDE.S
@@ -11,18 +11,14 @@
     testl     %eax,%eax                 # null array object?
     je        common_errNullObject      # bail if so
     cmpl      offArrayObject_length(%eax),%ecx
-    jb        .L${opcode}_finish        # index < length, OK
-    jmp       common_errArrayIndex      # index >= length, bail.  Expects:
+    jae       common_errArrayIndex      # index >= length, bail.  Expects:
                                         #   arrayObj in eax
                                         #   index in ecx
-%break
-
-.L${opcode}_finish:
     leal      offArrayObject_contents(%eax,%ecx,8),%eax
     GET_VREG_WORD %ecx rINST 0
     GET_VREG_WORD rINST rINST 1
-    movl      rINST,4(%eax)
-    FETCH_INST_OPCODE 2 %edx
     movl      %ecx,(%eax)
+    FETCH_INST_OPCODE 2 %ecx
+    movl      rINST,4(%eax)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_ARRAY_LENGTH.S b/vm/mterp/x86/OP_ARRAY_LENGTH.S
index 1666d0e..25caca3 100644
--- a/vm/mterp/x86/OP_ARRAY_LENGTH.S
+++ b/vm/mterp/x86/OP_ARRAY_LENGTH.S
@@ -8,8 +8,8 @@
    andb     $$0xf,%al                 # eax<- A
    testl    %ecx,%ecx                 # is null?
    je       common_errNullObject
-   FETCH_INST_OPCODE 1 %edx
-   movl     offArrayObject_length(%ecx),%ecx
+   movl     offArrayObject_length(%ecx),rINST
+   FETCH_INST_OPCODE 1 %ecx
    ADVANCE_PC 1
-   SET_VREG %ecx %eax
-   GOTO_NEXT_R %edx
+   SET_VREG rINST %eax
+   GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CHECK_CAST.S b/vm/mterp/x86/OP_CHECK_CAST.S
index 6fb8415..0543a99 100644
--- a/vm/mterp/x86/OP_CHECK_CAST.S
+++ b/vm/mterp/x86/OP_CHECK_CAST.S
@@ -9,10 +9,10 @@
      * Check to see if a cast from one class to another is allowed.
      */
     /* check-cast vAA, class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     GET_VREG_R  rINST,rINST             # rINST<- vAA (object)
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     testl     rINST,rINST               # is oject null?
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
     je        .L${opcode}_okay          # null obj, cast always succeeds
@@ -24,10 +24,9 @@
     cmpl      %eax,%ecx                 # same class (trivial success)?
     jne       .L${opcode}_fullcheck     # no, do full check
 .L${opcode}_okay:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
     /*
      * Trivial test failed, need to perform full check.  This is common.
@@ -39,7 +38,9 @@
     movl    %eax,sReg0                 # we'll need the desired class on failure
     movl    %eax,OUT_ARG1(%esp)
     movl    %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call    dvmInstanceofNonTrivial    # eax<- boolean result
+    UNSPILL(rIBASE)
     testl   %eax,%eax                  # failed?
     jne     .L${opcode}_okay           # no, success
 
@@ -59,15 +60,17 @@
      *  rINST holds object
      */
 .L${opcode}_resolve:
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     EXPORT_PC
     movzwl  2(rPC),%eax                # eax<- BBBB
-    movl    offGlue_method(%ecx),%ecx  # ecx<- glue->method
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
     movl    %eax,OUT_ARG1(%esp)        # arg1<- BBBB
     movl    offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
     movl    $$0,OUT_ARG2(%esp)         # arg2<- false
     movl    %ecx,OUT_ARG0(%esp)        # arg0<- method->clazz
+    SPILL(rIBASE)
     call    dvmResolveClass            # eax<- resolved ClassObject ptr
+    UNSPILL(rIBASE)
     testl   %eax,%eax                  # got null?
     je      common_exceptionThrown     # yes, handle exception
     movl    offObject_clazz(rINST),%ecx  # ecx<- obj->clazz
diff --git a/vm/mterp/x86/OP_CHECK_CAST_JUMBO.S b/vm/mterp/x86/OP_CHECK_CAST_JUMBO.S
new file mode 100644
index 0000000..bf99bd7
--- /dev/null
+++ b/vm/mterp/x86/OP_CHECK_CAST_JUMBO.S
@@ -0,0 +1,77 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/jumbo vBBBB, class@AAAAAAAA */
+    movl      rSELF,%ecx
+    GET_VREG_R  rINST,rINST             # rINST<- vBBBB (object)
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    testl     rINST,rINST               # is oject null?
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    je        .L${opcode}_okay          # null obj, cast always succeeds
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved class
+    movl      offObject_clazz(rINST),%ecx # ecx<- obj->clazz
+    testl     %eax,%eax                 # have we resolved this before?
+    je        .L${opcode}_resolve       # no, go do it now
+.L${opcode}_resolved:
+    cmpl      %eax,%ecx                 # same class (trivial success)?
+    jne       .L${opcode}_fullcheck     # no, do full check
+.L${opcode}_okay:
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  ecx holds obj->clazz
+     *  eax holds class resolved from AAAAAAAA
+     *  rINST holds object
+     */
+.L${opcode}_fullcheck:
+    movl    %eax,sReg0                 # we'll need the desired class on failure
+    movl    %eax,OUT_ARG1(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call    dvmInstanceofNonTrivial    # eax<- boolean result
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # failed?
+    jne     .L${opcode}_okay           # no, success
+
+    # A cast has failed.  We need to throw a ClassCastException.
+    EXPORT_PC
+    movl    offObject_clazz(rINST),%eax
+    movl    %eax,OUT_ARG0(%esp)                 # arg0<- obj->clazz
+    movl    sReg0,%ecx
+    movl    %ecx,OUT_ARG1(%esp)                 # arg1<- desired class
+    call    dvmThrowClassCastException
+    jmp     common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path, and we're
+     * going to have to recreate some data.
+     *
+     *  rINST holds object
+     */
+.L${opcode}_resolve:
+    movl    rSELF,%ecx
+    EXPORT_PC
+    movl    2(rPC),%eax                # eax<- AAAAAAAA
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)        # arg1<- AAAAAAAA
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
+    movl    $$0,OUT_ARG2(%esp)         # arg2<- false
+    movl    %ecx,OUT_ARG0(%esp)        # arg0<- method->clazz
+    SPILL(rIBASE)
+    call    dvmResolveClass            # eax<- resolved ClassObject ptr
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                  # got null?
+    je      common_exceptionThrown     # yes, handle exception
+    movl    offObject_clazz(rINST),%ecx  # ecx<- obj->clazz
+    jmp     .L${opcode}_resolved       # pick up where we left off
diff --git a/vm/mterp/x86/OP_CMPG_DOUBLE.S b/vm/mterp/x86/OP_CMPG_DOUBLE.S
index e50f0d6..1388d7c 100644
--- a/vm/mterp/x86/OP_CMPG_DOUBLE.S
+++ b/vm/mterp/x86/OP_CMPG_DOUBLE.S
@@ -17,18 +17,16 @@
     fucompp     # z if equal, p set if NaN, c set if st0 < st1
     fnstsw   %ax
     sahf
-    movl      rINST,%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     jp       .L${opcode}_isNaN
     je       .L${opcode}_finish
     sbbl     %ecx,%ecx
     jb       .L${opcode}_finish
     incl     %ecx
 .L${opcode}_finish:
-    SET_VREG %ecx %eax
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %eax
 
 .L${opcode}_isNaN:
     movl      $$$nanval,%ecx
diff --git a/vm/mterp/x86/OP_CMP_LONG.S b/vm/mterp/x86/OP_CMP_LONG.S
index e2b7436..5202c27 100644
--- a/vm/mterp/x86/OP_CMP_LONG.S
+++ b/vm/mterp/x86/OP_CMP_LONG.S
@@ -6,27 +6,37 @@
      * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
      * register based on the results of the comparison.
      */
+    // TUNING: rework to avoid rIBASE spill
     /* cmp-long vAA, vBB, vCC */
     movzbl    2(rPC),%ecx              # ecx<- BB
-    movzbl    3(rPC),%edx              # edx<- CC
+    SPILL(rIBASE)
+    movzbl    3(rPC),rIBASE            # rIBASE- CC
     GET_VREG_WORD %eax %ecx,1          # eax<- v[BB+1]
     GET_VREG_WORD %ecx %ecx 0          # ecx<- v[BB+0]
-    cmpl      4(rFP,%edx,4),%eax
+    cmpl      4(rFP,rIBASE,4),%eax
     jl        .L${opcode}_smaller
     jg        .L${opcode}_bigger
-    sub       (rFP,%edx,4),%ecx
+    sub       (rFP,rIBASE,4),%ecx
     ja        .L${opcode}_bigger
     jb        .L${opcode}_smaller
-    jmp       .L${opcode}_finish
-%break
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_bigger:
     movl      $$1,%ecx
-    jmp       .L${opcode}_finish
+    SET_VREG %ecx rINST
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
 .L${opcode}_smaller:
     movl      $$-1,%ecx
-.L${opcode}_finish:
     SET_VREG %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST.S b/vm/mterp/x86/OP_CONST.S
index b1ff401..cb70824 100644
--- a/vm/mterp/x86/OP_CONST.S
+++ b/vm/mterp/x86/OP_CONST.S
@@ -1,8 +1,8 @@
 %verify "executed"
     /* const vAA, #+BBBBbbbb */
     movl      2(rPC),%eax             # grab all 32 bits at once
-    movl      rINST,%ecx              # ecx<- AA
-    FETCH_INST_OPCODE 3 %edx
+    movl      rINST,rINST             # rINST<- AA
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    SET_VREG %eax %ecx                # vAA<- eax
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST               # vAA<- eax
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_16.S b/vm/mterp/x86/OP_CONST_16.S
index df3d423..b956ee6 100644
--- a/vm/mterp/x86/OP_CONST_16.S
+++ b/vm/mterp/x86/OP_CONST_16.S
@@ -1,8 +1,7 @@
 %verify "executed"
     /* const/16 vAA, #+BBBB */
     movswl  2(rPC),%ecx                # ecx<- ssssBBBB
-    movl    rINST,%eax                 # eax<- AA
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %eax
     ADVANCE_PC 2
-    SET_VREG %ecx %eax                 # vAA<- ssssBBBB
-    GOTO_NEXT_R %edx
+    SET_VREG %ecx rINST                # vAA<- ssssBBBB
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_CONST_4.S b/vm/mterp/x86/OP_CONST_4.S
index 54d1e44..3db437a 100644
--- a/vm/mterp/x86/OP_CONST_4.S
+++ b/vm/mterp/x86/OP_CONST_4.S
@@ -1,10 +1,10 @@
 %verify "executed"
     /* const/4 vA, #+B */
     movsx   rINSTbl,%eax              # eax<-ssssssBx
-    movl    $$0xf,%ecx
-    andl    %eax,%ecx                 # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
+    movl    $$0xf,rINST
+    andl    %eax,rINST                # rINST<- A
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
     sarl    $$4,%eax
-    SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_CLASS.S b/vm/mterp/x86/OP_CONST_CLASS.S
index 96890f5..8b12226 100644
--- a/vm/mterp/x86/OP_CONST_CLASS.S
+++ b/vm/mterp/x86/OP_CONST_CLASS.S
@@ -3,36 +3,35 @@
 %verify "Class not yet resolved"
 %verify "Class cannot be resolved"
     /* const/class vAA, Class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
     movl      (%ecx,%eax,4),%eax       # eax<- rResClasses[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 2 %edx
     testl     %eax,%eax                # resolved yet?
     je        .L${opcode}_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResClasses[BBBB]
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG  %eax rINST               # vAA<- rResClasses[BBBB]
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
 /* This is the less common path, so we'll redo some work
    here rather than force spills on the common path */
 .L${opcode}_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
+    movl     rSELF,%eax
     EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
+    movl     offThread_method(%eax),%eax # eax<- self->method
     movl     $$1,OUT_ARG2(%esp)        # true
     movzwl   2(rPC),%ecx               # ecx<- BBBB
     movl     offMethod_clazz(%eax),%eax
     movl     %ecx,OUT_ARG1(%esp)
     movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveClass           # go resolve
+    UNSPILL(rIBASE)
     testl    %eax,%eax                 # failed?
     je       common_exceptionThrown
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_CLASS_JUMBO.S b/vm/mterp/x86/OP_CONST_CLASS_JUMBO.S
new file mode 100644
index 0000000..ce64823
--- /dev/null
+++ b/vm/mterp/x86/OP_CONST_CLASS_JUMBO.S
@@ -0,0 +1,36 @@
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax              # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
+    movl      (%ecx,%eax,4),%eax       # eax<- rResClasses[AAAAAAAA]
+    FETCH_INST_OPCODE 4 %ecx
+    testl     %eax,%eax                # resolved yet?
+    je        .L${opcode}_resolve
+    SET_VREG  %eax rINST               # vBBBB<- rResClasses[AAAAAAAA]
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+/* This is the less common path, so we'll redo some work
+   here rather than force spills on the common path */
+.L${opcode}_resolve:
+    movl     rSELF,%eax
+    EXPORT_PC
+    movl     offThread_method(%eax),%eax # eax<- self->method
+    movl     $$1,OUT_ARG2(%esp)        # true
+    movl     2(rPC),%ecx               # ecx<- AAAAAAAA
+    movl     offMethod_clazz(%eax),%eax
+    movl     %ecx,OUT_ARG1(%esp)
+    movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveClass           # go resolve
+    UNSPILL(rIBASE)
+    testl    %eax,%eax                 # failed?
+    je       common_exceptionThrown
+    FETCH_INST_OPCODE 4 %ecx
+    SET_VREG %eax rINST
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_HIGH16.S b/vm/mterp/x86/OP_CONST_HIGH16.S
index 3c3b2d7..9ecf7c6 100644
--- a/vm/mterp/x86/OP_CONST_HIGH16.S
+++ b/vm/mterp/x86/OP_CONST_HIGH16.S
@@ -1,9 +1,8 @@
 %verify "executed"
     /* const/high16 vAA, #+BBBB0000 */
     movzwl     2(rPC),%eax                # eax<- 0000BBBB
-    movl       rINST,%ecx                 # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     sall       $$16,%eax                  # eax<- BBBB0000
-    SET_VREG %eax %ecx                    # vAA<- eax
-    GOTO_NEXT_R %edx
+    SET_VREG %eax rINST                   # vAA<- eax
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_STRING.S b/vm/mterp/x86/OP_CONST_STRING.S
index 8fd9590..538cace 100644
--- a/vm/mterp/x86/OP_CONST_STRING.S
+++ b/vm/mterp/x86/OP_CONST_STRING.S
@@ -3,35 +3,34 @@
 %verify "String not yet resolved"
 %verify "String cannot be resolved"
     /* const/string vAA, String@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
     movl      (%ecx,%eax,4),%eax       # eax<- rResString[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     testl     %eax,%eax                # resolved yet?
     je        .L${opcode}_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResString[BBBB]
+    SET_VREG  %eax rINST               # vAA<- rResString[BBBB]
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
 /* This is the less common path, so we'll redo some work
    here rather than force spills on the common path */
 .L${opcode}_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
+    movl     rSELF,%eax
     EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
+    movl     offThread_method(%eax),%eax # eax<- self->method
     movzwl   2(rPC),%ecx               # ecx<- BBBB
     movl     offMethod_clazz(%eax),%eax
     movl     %ecx,OUT_ARG1(%esp)
     movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveString          # go resolve
+    UNSPILL(rIBASE)
     testl    %eax,%eax                 # failed?
     je       common_exceptionThrown
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 2 %edx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_STRING_JUMBO.S b/vm/mterp/x86/OP_CONST_STRING_JUMBO.S
index 9f5e16a..6148244 100644
--- a/vm/mterp/x86/OP_CONST_STRING_JUMBO.S
+++ b/vm/mterp/x86/OP_CONST_STRING_JUMBO.S
@@ -3,35 +3,34 @@
 %verify "String not yet resolved"
 %verify "String cannot be resolved"
     /* const/string vAA, String@BBBBBBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movl      2(rPC),%eax              # eax<- BBBBBBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+    movl      offThread_methodClassDex(%ecx),%ecx# ecx<- self->methodClassDex
     movl      offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
     movl      (%ecx,%eax,4),%eax       # eax<- rResString[BBBB]
-    movl      rINST,%ecx
-    FETCH_INST_OPCODE 3 %edx
+    FETCH_INST_OPCODE 3 %ecx
     testl     %eax,%eax                # resolved yet?
     je        .L${opcode}_resolve
-    SET_VREG  %eax %ecx                # vAA<- rResString[BBBB]
+    SET_VREG  %eax rINST               # vAA<- rResString[BBBB]
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
 /* This is the less common path, so we'll redo some work
    here rather than force spills on the common path */
 .L${opcode}_resolve:
-    movl     rGLUE,%eax
-    movl     %ecx,rINST                # rINST<- AA
+    movl     rSELF,%eax
     EXPORT_PC
-    movl     offGlue_method(%eax),%eax # eax<- glue->method
+    movl     offThread_method(%eax),%eax # eax<- self->method
     movl     2(rPC),%ecx               # ecx<- BBBBBBBB
     movl     offMethod_clazz(%eax),%eax
     movl     %ecx,OUT_ARG1(%esp)
     movl     %eax,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveString          # go resolve
+    UNSPILL(rIBASE)
     testl    %eax,%eax                 # failed?
     je       common_exceptionThrown
+    FETCH_INST_OPCODE 3 %ecx
     SET_VREG %eax rINST
-    FETCH_INST_OPCODE 3 %edx
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_WIDE.S b/vm/mterp/x86/OP_CONST_WIDE.S
index b4273df..253af78 100644
--- a/vm/mterp/x86/OP_CONST_WIDE.S
+++ b/vm/mterp/x86/OP_CONST_WIDE.S
@@ -5,7 +5,7 @@
     movl      6(rPC),rINST        # rINST<- msw
     leal      (rFP,%ecx,4),%ecx   # dst addr
     movl      rINST,4(%ecx)
-    FETCH_INST_OPCODE 5 %edx
     movl      %eax,(%ecx)
+    FETCH_INST_OPCODE 5 %ecx
     ADVANCE_PC 5
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_WIDE_16.S b/vm/mterp/x86/OP_CONST_WIDE_16.S
index b720748..ee8004c 100644
--- a/vm/mterp/x86/OP_CONST_WIDE_16.S
+++ b/vm/mterp/x86/OP_CONST_WIDE_16.S
@@ -1,9 +1,11 @@
 %verify "executed"
     /* const-wide/16 vAA, #+BBBB */
     movswl    2(rPC),%eax               # eax<- ssssBBBB
-    cltd                                # rPC:eax<- ssssssssssssBBBB
-    SET_VREG_WORD %edx rINST 1          # store msw
-    FETCH_INST_OPCODE 2 %edx
+    SPILL(rIBASE)                       # preserve rIBASE (cltd trashes it)
+    cltd                                # rIBASE:eax<- ssssssssssssBBBB
+    SET_VREG_WORD rIBASE rINST 1        # store msw
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
     SET_VREG_WORD %eax rINST 0          # store lsw
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_WIDE_32.S b/vm/mterp/x86/OP_CONST_WIDE_32.S
index b059529..12cdd01 100644
--- a/vm/mterp/x86/OP_CONST_WIDE_32.S
+++ b/vm/mterp/x86/OP_CONST_WIDE_32.S
@@ -1,9 +1,11 @@
 %verify "executed"
     /* const-wide/32 vAA, #+BBBBbbbb */
     movl     2(rPC),%eax                # eax<- BBBBbbbb
-    cltd                                # rPC:eax<- ssssssssssssBBBB
-    SET_VREG_WORD %edx rINST,1          # store msw
-    FETCH_INST_OPCODE 3 %edx
+    SPILL(rIBASE)                       # save rIBASE (cltd trashes it)
+    cltd                                # rIBASE:eax<- ssssssssssssBBBB
+    SET_VREG_WORD rIBASE rINST,1        # store msw
+    FETCH_INST_OPCODE 3 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
     SET_VREG_WORD %eax rINST 0          # store lsw
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_CONST_WIDE_HIGH16.S b/vm/mterp/x86/OP_CONST_WIDE_HIGH16.S
index dae78cb..15a0c55 100644
--- a/vm/mterp/x86/OP_CONST_WIDE_HIGH16.S
+++ b/vm/mterp/x86/OP_CONST_WIDE_HIGH16.S
@@ -1,10 +1,10 @@
 %verify "executed"
     /* const-wide/high16 vAA, #+BBBB000000000000 */
     movzwl     2(rPC),%eax                # eax<- 0000BBBB
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     sall       $$16,%eax                  # eax<- BBBB0000
     SET_VREG_WORD %eax rINST 1            # v[AA+1]<- eax
     xorl       %eax,%eax
     SET_VREG_WORD %eax rINST 0            # v[AA+0]<- eax
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_DISPATCH_FF.S b/vm/mterp/x86/OP_DISPATCH_FF.S
index 31d98c1..fbd5a3d 100644
--- a/vm/mterp/x86/OP_DISPATCH_FF.S
+++ b/vm/mterp/x86/OP_DISPATCH_FF.S
@@ -1 +1,3 @@
-%include "x86/unused.S"
+%verify "executed"
+    leal      256(rINST),%ecx
+    GOTO_NEXT_JUMBO_R %ecx
diff --git a/vm/mterp/x86/OP_DIV_LONG.S b/vm/mterp/x86/OP_DIV_LONG.S
index 4a7704b..0dc5546 100644
--- a/vm/mterp/x86/OP_DIV_LONG.S
+++ b/vm/mterp/x86/OP_DIV_LONG.S
@@ -3,46 +3,44 @@
     /* div vAA, vBB, vCC */
     movzbl    3(rPC),%eax              # eax<- CC
     movzbl    2(rPC),%ecx              # ecx<- BB
-    GET_VREG_WORD %edx %eax 0
+    SPILL(rIBASE)                      # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
     GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
+    movl     rIBASE,OUT_ARG2(%esp)
     testl    %eax,%eax
     je       .L${opcode}_check_zero
     cmpl     $$-1,%eax
     je       .L${opcode}_check_neg1
 .L${opcode}_notSpecial:
-    GET_VREG_WORD %edx %ecx 0
+    GET_VREG_WORD rIBASE %ecx 0
     GET_VREG_WORD %ecx %ecx 1
 .L${opcode}_notSpecial1:
     movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
     movl     %ecx,OUT_ARG1(%esp)
-    jmp      .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
     call     $routine
 .L${opcode}_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                 # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .L${opcode}_notSpecial
     jmp     common_errDivideByZero
 .L${opcode}_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .L${opcode}_notSpecial
-    GET_VREG_WORD %edx %ecx 0
+    GET_VREG_WORD rIBASE %ecx 0
     GET_VREG_WORD %ecx %ecx 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .L${opcode}_notSpecial1
     cmpl     $$0x80000000,%ecx
     jne      .L${opcode}_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $special,%edx
+    movl     $special,rIBASE
     jmp      .L${opcode}_finish
diff --git a/vm/mterp/x86/OP_DIV_LONG_2ADDR.S b/vm/mterp/x86/OP_DIV_LONG_2ADDR.S
index 64de025..4722098 100644
--- a/vm/mterp/x86/OP_DIV_LONG_2ADDR.S
+++ b/vm/mterp/x86/OP_DIV_LONG_2ADDR.S
@@ -4,46 +4,44 @@
     movzbl    rINSTbl,%eax
     shrl      $$4,%eax                  # eax<- B
     andb      $$0xf,rINSTbl             # rINST<- A
-    GET_VREG_WORD %edx %eax 0
+    SPILL(rIBASE)                       # save rIBASE/%edx
+    GET_VREG_WORD rIBASE %eax 0
     GET_VREG_WORD %eax %eax 1
-    movl     %edx,OUT_ARG2(%esp)
+    movl     rIBASE,OUT_ARG2(%esp)
     testl    %eax,%eax
     je       .L${opcode}_check_zero
     cmpl     $$-1,%eax
     je       .L${opcode}_check_neg1
 .L${opcode}_notSpecial:
-    GET_VREG_WORD %edx rINST 0
+    GET_VREG_WORD rIBASE rINST 0
     GET_VREG_WORD %ecx rINST 1
 .L${opcode}_notSpecial1:
-    jmp      .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
     movl     %eax,OUT_ARG3(%esp)
-    movl     %edx,OUT_ARG0(%esp)
+    movl     rIBASE,OUT_ARG0(%esp)
     movl     %ecx,OUT_ARG1(%esp)
     call     $routine
 .L${opcode}_finish:
-    SET_VREG_WORD %edx rINST 1
+    SET_VREG_WORD rIBASE rINST 1
+    UNSPILL(rIBASE)                    # restore rIBASE/%edx
     SET_VREG_WORD %eax rINST 0
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_check_zero:
-    testl   %edx,%edx
+    testl   rIBASE,rIBASE
     jne     .L${opcode}_notSpecial
     jmp     common_errDivideByZero
 .L${opcode}_check_neg1:
-    testl   %edx,%eax
+    testl   rIBASE,%eax
     jne     .L${opcode}_notSpecial
-    GET_VREG_WORD %edx rINST 0
+    GET_VREG_WORD rIBASE rINST 0
     GET_VREG_WORD %ecx rINST 1
-    testl    %edx,%edx
+    testl    rIBASE,rIBASE
     jne      .L${opcode}_notSpecial1
     cmpl     $$0x80000000,%ecx
     jne      .L${opcode}_notSpecial1
     /* minint / -1, return minint on div, 0 on rem */
     xorl     %eax,%eax
-    movl     $special,%edx
+    movl     $special,rIBASE
     jmp      .L${opcode}_finish
diff --git a/vm/mterp/x86/OP_EXECUTE_INLINE.S b/vm/mterp/x86/OP_EXECUTE_INLINE.S
index 85f9fcf..ec91076 100644
--- a/vm/mterp/x86/OP_EXECUTE_INLINE.S
+++ b/vm/mterp/x86/OP_EXECUTE_INLINE.S
@@ -11,18 +11,19 @@
      *
      */
     /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     EXPORT_PC
     movzwl    2(rPC),%eax               # eax<- BBBB
-    leal      offGlue_retval(%ecx),%ecx # ecx<- & glue->retval
+    leal      offThread_retval(%ecx),%ecx # ecx<- & self->retval
+    SPILL(rIBASE)                       # preserve rIBASE
     movl      %ecx,OUT_ARG4(%esp)
     call      .L${opcode}_continue      # make call; will return after
+    UNSPILL(rIBASE)                     # restore rIBASE
     testl     %eax,%eax                 # successful?
-    FETCH_INST_OPCODE 3 %edx
+    FETCH_INST_OPCODE 3 %ecx
     je        common_exceptionThrown    # no, handle exception
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_continue:
     /*
@@ -34,30 +35,30 @@
      *
      *  Go ahead and load all 4 args, even if not used.
      */
-    movzwl    4(rPC),%edx
+    movzwl    4(rPC),rIBASE
 
     movl      $$0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $$4,%edx
+    sarl      $$4,rIBASE
     movl      %ecx,4+OUT_ARG0(%esp)
 
     movl      $$0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $$4,%edx
+    sarl      $$4,rIBASE
     movl      %ecx,4+OUT_ARG1(%esp)
 
     movl      $$0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $$4,%edx
+    sarl      $$4,rIBASE
     movl      %ecx,4+OUT_ARG2(%esp)
 
     movl      $$0xf,%ecx
-    andl      %edx,%ecx
+    andl      rIBASE,%ecx
     GET_VREG_R  %ecx %ecx
-    sarl      $$4,%edx
+    sarl      $$4,rIBASE
     movl      %ecx,4+OUT_ARG3(%esp)
 
     sall      $$4,%eax      # index *= sizeof(table entry)
diff --git a/vm/mterp/x86/OP_FILLED_NEW_ARRAY.S b/vm/mterp/x86/OP_FILLED_NEW_ARRAY.S
index 4e303ad..dde53aa 100644
--- a/vm/mterp/x86/OP_FILLED_NEW_ARRAY.S
+++ b/vm/mterp/x86/OP_FILLED_NEW_ARRAY.S
@@ -8,23 +8,20 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    movl    rGLUE,%eax
-    movl    offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+    movl    rSELF,%eax
+    movl    offThread_methodClassDex(%eax),%eax # eax<- pDvmDex
     movzwl  2(rPC),%ecx                       # ecx<- BBBB
     movl    offDvmDex_pResClasses(%eax),%eax  # eax<- pDvmDex->pResClasses
+    SPILL(rIBASE)                             # preserve rIBASE
     movl    (%eax,%ecx,4),%eax                # eax<- resolved class
     EXPORT_PC
     testl   %eax,%eax                         # already resolved?
     jne     .L${opcode}_continue              # yes, continue
     # less frequent path, so we'll redo some work
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     movl    $$0,OUT_ARG2(%esp)                # arg2<- false
     movl    %ecx,OUT_ARG1(%esp)               # arg1<- BBBB
-    movl    offGlue_method(%eax),%eax         # eax<- glue->method
-    jmp     .L${opcode}_more
-%break
-
-.L${opcode}_more:
+    movl    offThread_method(%eax),%eax         # eax<- self->method
     movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
     movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
     call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
@@ -44,7 +41,7 @@
     movl    $$ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
     movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
     movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     cmpb    $$'I',%cl                             # supported?
     je      1f
     cmpb    $$'L',%cl
@@ -52,17 +49,17 @@
     cmpb    $$'[',%cl
     jne      .L${opcode}_notimpl                  # no, not handled yet
 1:
-    movl    %ecx,offGlue_retval+4(%eax)           # save type
+    movl    %ecx,offThread_retval+4(%eax)           # save type
     .if      (!$isrange)
     SPILL_TMP1(rINST)                              # save copy, need "B" later
     sarl    $$4,rINST
     .endif
     movl    rINST,OUT_ARG1(%esp)                  # arg1<- A or AA (length)
     call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     testl   %eax,%eax                             # alloc successful?
     je      common_exceptionThrown                # no, handle exception
-    movl    %eax,offGlue_retval(%ecx)             # retval.l<- new array
+    movl    %eax,offThread_retval(%ecx)             # retval.l<- new array
     movzwl  4(rPC),%ecx                           # ecx<- FEDC or CCCC
     leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
 
@@ -85,40 +82,40 @@
     movsd
     UNSPILL_TMP2(%esi)
     UNSPILL_TMP3(%edi)
-    movl    rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
+    movl    rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
     .else
     testl  rINST,rINST
     je     4f
-    UNSPILL_TMP1(%edx)        # restore "BA"
-    andl   $$0x0f,%edx        # edx<- 0000000A
-    sall   $$16,%edx          # edx<- 000A0000
-    orl    %ecx,%edx          # edx<- 000AFEDC
+    UNSPILL_TMP1(rIBASE)      # restore "BA"
+    andl   $$0x0f,rIBASE      # rIBASE<- 0000000A
+    sall   $$16,rIBASE        # rIBASE<- 000A0000
+    orl    %ecx,rIBASE        # rIBASE<- 000AFEDC
 3:
     movl   $$0xf,%ecx
-    andl   %edx,%ecx          # ecx<- next reg to load
+    andl   rIBASE,%ecx        # ecx<- next reg to load
     GET_VREG_R %ecx %ecx
-    shrl   $$4,%edx
+    shrl   $$4,rIBASE
     leal   4(%eax),%eax
     movl   %ecx,-4(%eax)
     sub    $$1,rINST
     jne    3b
 4:
-    movl   rGLUE,%ecx
-    movl    offGlue_retval+4(%ecx),%eax      # eax<- type
-    FETCH_INST_OPCODE 3 %edx
+    movl   rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
     .endif
 
     cmpb    $$'I',%al                        # Int array?
     je      5f                               # skip card mark if so
-    movl    offGlue_retval(%ecx),%eax        # eax<- object head
-    movl    offGlue_cardTable(%ecx),%ecx     # card table base
+    movl    offThread_retval(%ecx),%eax        # eax<- object head
+    movl    offThread_cardTable(%ecx),%ecx     # card table base
     shrl    $$GC_CARD_SHIFT,%eax             # convert to card num
     movb    %cl,(%ecx,%eax)                  # mark card based on object head
 5:
+    UNSPILL(rIBASE)                          # restore rIBASE
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 
     /*
@@ -126,9 +123,7 @@
      * mode of filled-new-array.
      */
 .L${opcode}_notimpl:
-    movl    $$.LstrInternalErrorA,%eax
-    movl    %eax,OUT_ARG0(%esp)
     movl    $$.LstrFilledNewArrayNotImplA,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowInternalError
     jmp     common_exceptionThrown
diff --git a/vm/mterp/x86/OP_FILLED_NEW_ARRAY_JUMBO.S b/vm/mterp/x86/OP_FILLED_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..80f2b38
--- /dev/null
+++ b/vm/mterp/x86/OP_FILLED_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,100 @@
+%verify "executed"
+%verify "unimplemented array type"
+    /*
+     * Create a new array with elements filled from registers.
+     */
+    /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+    movl    rSELF,%eax
+    movl    offThread_methodClassDex(%eax),%eax # eax<- pDvmDex
+    movl    2(rPC),%ecx                       # ecx<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%eax),%eax  # eax<- pDvmDex->pResClasses
+    movl    (%eax,%ecx,4),%eax                # eax<- resolved class
+    EXPORT_PC
+    testl   %eax,%eax                         # already resolved?
+    jne     .L${opcode}_continue              # yes, continue
+    # less frequent path, so we'll redo some work
+    movl    rSELF,%eax
+    movl    $$0,OUT_ARG2(%esp)                # arg2<- false
+    movl    %ecx,OUT_ARG1(%esp)               # arg1<- AAAAAAAA
+    movl    offThread_method(%eax),%eax         # eax<- self->method
+    movl    offMethod_clazz(%eax),%eax        # eax<- method->clazz
+    movl    %eax,OUT_ARG0(%esp)               # arg0<- clazz
+    SPILL(rIBASE)
+    call    dvmResolveClass                   # eax<- call(clazz,ref,flag)
+    UNSPILL(rIBASE)
+    testl   %eax,%eax                         # null?
+    je      common_exceptionThrown            # yes, handle it
+
+       # note: fall through to .L${opcode}_continue
+
+    /*
+     * On entry:
+     *    eax holds array class [r0]
+     *    ecx is scratch
+     */
+.L${opcode}_continue:
+    movl    offClassObject_descriptor(%eax),%ecx  # ecx<- arrayClass->descriptor
+    movl    $$ALLOC_DONT_TRACK,OUT_ARG2(%esp)     # arg2<- flags
+    movzbl  1(%ecx),%ecx                          # ecx<- descriptor[1]
+    movl    %eax,OUT_ARG0(%esp)                   # arg0<- arrayClass
+    movl    rSELF,%eax
+    cmpb    $$'I',%cl                             # supported?
+    je      1f
+    cmpb    $$'L',%cl
+    je      1f
+    cmpb    $$'[',%cl
+    jne      .L${opcode}_notimpl                  # no, not handled yet
+1:
+    movl    %ecx,offThread_retval+4(%eax)           # save type
+    movl    rINST,OUT_ARG1(%esp)                  # arg1<- BBBB (length)
+    SPILL(rIBASE)
+    call    dvmAllocArrayByClass     # eax<- call(arrayClass, length, flags)
+    UNSPILL(rIBASE)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                             # alloc successful?
+    je      common_exceptionThrown                # no, handle exception
+    movl    %eax,offThread_retval(%ecx)             # retval.l<- new array
+    movzwl  8(rPC),%ecx                           # ecx<- CCCC
+    leal    offArrayObject_contents(%eax),%eax    # eax<- newArray->contents
+
+/* at this point:
+ *     eax is pointer to tgt
+ *     rINST is length
+ *     ecx is CCCC
+ *  We now need to copy values from registers into the array
+ */
+
+    # set up src pointer
+    SPILL_TMP2(%esi)
+    SPILL_TMP3(%edi)
+    leal    (rFP,%ecx,4),%esi # set up src ptr
+    movl    %eax,%edi         # set up dst ptr
+    movl    rINST,%ecx        # load count register
+    rep
+    movsd
+    UNSPILL_TMP2(%esi)
+    UNSPILL_TMP3(%edi)
+    movl    rSELF,%ecx
+    movl    offThread_retval+4(%ecx),%eax      # eax<- type
+
+    cmpb    $$'I',%al                        # Int array?
+    je      5f                               # skip card mark if so
+    movl    offThread_retval(%ecx),%eax        # eax<- object head
+    movl    offThread_cardTable(%ecx),%ecx     # card table base
+    shrl    $$GC_CARD_SHIFT,%eax             # convert to card num
+    movb    %cl,(%ecx,%eax)                  # mark card based on object head
+5:
+    FETCH_INST_OPCODE 5 %ecx
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.L${opcode}_notimpl:
+    movl    $$.LstrFilledNewArrayNotImplA,%eax
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowInternalError
+    jmp     common_exceptionThrown
diff --git a/vm/mterp/x86/OP_FILL_ARRAY_DATA.S b/vm/mterp/x86/OP_FILL_ARRAY_DATA.S
index 28826f4..5ca17a6 100644
--- a/vm/mterp/x86/OP_FILL_ARRAY_DATA.S
+++ b/vm/mterp/x86/OP_FILL_ARRAY_DATA.S
@@ -6,9 +6,11 @@
     EXPORT_PC
     movl    %eax,OUT_ARG0(%esp)
     movl    %ecx,OUT_ARG1(%esp)
+    SPILL(rIBASE)
     call    dvmInterpHandleFillArrayData
-    FETCH_INST_OPCODE 3 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 3 %ecx
     testl   %eax,%eax                   # exception thrown?
     je      common_exceptionThrown
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IGET.S b/vm/mterp/x86/OP_IGET.S
index 0495827..5740690 100644
--- a/vm/mterp/x86/OP_IGET.S
+++ b/vm/mterp/x86/OP_IGET.S
@@ -10,29 +10,25 @@
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $$4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $$0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .L${opcode}_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # needed by dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .L${opcode}_resolve
-%break
-
-
-.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                  # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  returns InstrField ptr
@@ -50,8 +46,8 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     $load   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
-    movl    rINST,%eax                          # eax<- A
-    FETCH_INST_OPCODE 2 %edx
-    SET_VREG %ecx %eax
+    FETCH_INST_OPCODE 2 %eax
+    UNSPILL(rIBASE)
+    SET_VREG %ecx rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_IGET_BOOLEAN_JUMBO.S b/vm/mterp/x86/OP_IGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..726fa3e
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IGET_JUMBO.S" { "load":"movzbl", "sqnum":"1" }
diff --git a/vm/mterp/x86/OP_IGET_BYTE_JUMBO.S b/vm/mterp/x86/OP_IGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..88c3dfd
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_BYTE_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "x86/OP_IGET_JUMBO.S" { "load":"movsbl", "sqnum":"2" }
diff --git a/vm/mterp/x86/OP_IGET_CHAR_JUMBO.S b/vm/mterp/x86/OP_IGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..f654d77
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_CHAR_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+%include "x86/OP_IGET_JUMBO.S" { "load":"movzwl", "sqnum":"3" }
diff --git a/vm/mterp/x86/OP_IGET_JUMBO.S b/vm/mterp/x86/OP_IGET_JUMBO.S
new file mode 100644
index 0000000..da88f65
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_JUMBO.S
@@ -0,0 +1,52 @@
+%default { "load":"movl", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .L${opcode}_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # needed by dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  returns InstrField ptr
+    jne     .L${opcode}_finish
+    jmp     common_exceptionThrown
+
+.L${opcode}_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    $load   (%ecx,%eax,1),%ecx                  # ecx<- obj.field (8/16/32 bits)
+    FETCH_INST_OPCODE 5 %eax
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG %ecx rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_IGET_OBJECT_JUMBO.S b/vm/mterp/x86/OP_IGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..3fc93e0
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_IGET_QUICK.S b/vm/mterp/x86/OP_IGET_QUICK.S
index 51a5937..86f1f66 100644
--- a/vm/mterp/x86/OP_IGET_QUICK.S
+++ b/vm/mterp/x86/OP_IGET_QUICK.S
@@ -9,8 +9,8 @@
     cmpl      $$0,%ecx                  # is object null?
     je        common_errNullObject
     movl      (%ecx,%eax,1),%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     andb      $$0xf,rINSTbl             # rINST<- A
     SET_VREG  %eax rINST                # fp[A]<- result
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IGET_SHORT_JUMBO.S b/vm/mterp/x86/OP_IGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..4ead149
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_SHORT_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "x86/OP_IGET_JUMBO.S" { "load":"movswl", "sqnum":"4" }
diff --git a/vm/mterp/x86/OP_IGET_WIDE.S b/vm/mterp/x86/OP_IGET_WIDE.S
index 3e22dd2..723c9b7 100644
--- a/vm/mterp/x86/OP_IGET_WIDE.S
+++ b/vm/mterp/x86/OP_IGET_WIDE.S
@@ -8,27 +8,23 @@
      *
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $$4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $$0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .L${opcode}_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)                 # for dvmResolveInstField
-    movl    rGLUE,%edx
-    jmp     .L${opcode}_resolve
-%break
-
-
-.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)               # for dvmResolveInstField
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save objpointer across call
     movl    rPC,OUT_ARG0(%esp)                  # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
@@ -50,8 +46,9 @@
     leal    (%ecx,%eax,1),%eax                  # eax<- address of field
     movl    (%eax),%ecx                         # ecx<- lsw
     movl    4(%eax),%eax                        # eax<- msw
-    FETCH_INST_OPCODE 2 %edx
     SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                             # restore rIBASE
     SET_VREG_WORD %eax rINST 1
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IGET_WIDE_JUMBO.S b/vm/mterp/x86/OP_IGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..f20970f
--- /dev/null
+++ b/vm/mterp/x86/OP_IGET_WIDE_JUMBO.S
@@ -0,0 +1,51 @@
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)                               # preserve rIBASE
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .L${opcode}_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)               # for dvmResolveInstField
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save objpointer across call
+    movl    rPC,OUT_ARG0(%esp)                  # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .L${opcode}_finish
+    jmp     common_exceptionThrown
+
+.L${opcode}_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    leal    (%ecx,%eax,1),%eax                  # eax<- address of field
+    movl    (%eax),%ecx                         # ecx<- lsw
+    movl    4(%eax),%eax                        # eax<- msw
+    SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)                             # restore rIBASE
+    SET_VREG_WORD %eax rINST 1
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IGET_WIDE_QUICK.S b/vm/mterp/x86/OP_IGET_WIDE_QUICK.S
index 3867eea..dd63c73 100644
--- a/vm/mterp/x86/OP_IGET_WIDE_QUICK.S
+++ b/vm/mterp/x86/OP_IGET_WIDE_QUICK.S
@@ -12,8 +12,8 @@
     movl      (%eax),%ecx               # ecx<- lsw
     movl      4(%eax),%eax              # eax<- msw
     andb      $$0xf,rINSTbl             # rINST<- A
-    FETCH_INST_OPCODE 2 %edx
     SET_VREG_WORD %ecx rINST 0          # v[A+0]<- lsw
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG_WORD %eax rINST 1          # v[A+1]<- msw
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_INSTANCE_OF.S b/vm/mterp/x86/OP_INSTANCE_OF.S
index fddb5c8..c54f4f7 100644
--- a/vm/mterp/x86/OP_INSTANCE_OF.S
+++ b/vm/mterp/x86/OP_INSTANCE_OF.S
@@ -12,32 +12,29 @@
      * an already-resolved class.
      */
     /* instance-of vA, vB, class@CCCC */
-    movl    rINST,%eax                # eax<- BA
+    movl    rINST,%eax                  # eax<- BA
     sarl    $$4,%eax                    # eax<- B
     GET_VREG_R %eax %eax                # eax<- vB (obj)
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     testl   %eax,%eax                   # object null?
-    movl    offGlue_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl    offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)                       # preserve rIBASE
     je      .L${opcode}_store           # null obj, not instance, store it
-    movzwl  2(rPC),%edx                 # edx<- CCCC
+    movzwl  2(rPC),rIBASE               # rIBASE<- CCCC
     movl    offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
-    movl    (%ecx,%edx,4),%ecx          # ecx<- resolved class
+    movl    (%ecx,rIBASE,4),%ecx        # ecx<- resolved class
     movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
     testl   %ecx,%ecx                   # have we resolved this before?
     je      .L${opcode}_resolve         # not resolved, do it now
 .L${opcode}_resolved:  # eax<- obj->clazz, ecx<- resolved class
     cmpl    %eax,%ecx                   # same class (trivial success)?
     je      .L${opcode}_trivial         # yes, trivial finish
-    jmp     .L${opcode}_fullcheck       # no, do full check
-%break
-
     /*
      * Trivial test failed, need to perform full check.  This is common.
      *  eax holds obj->clazz
      *  ecx holds class resolved from BBBB
      *  rINST has BA
      */
-.L${opcode}_fullcheck:
     movl    %eax,OUT_ARG0(%esp)
     movl    %ecx,OUT_ARG1(%esp)
     call    dvmInstanceofNonTrivial     # eax<- boolean result
@@ -48,34 +45,36 @@
      * rINST holds BA
      */
 .L${opcode}_store:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     andb    $$0xf,rINSTbl               # <- A
     ADVANCE_PC 2
     SET_VREG %eax rINST                 # vA<- eax
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
     /*
      * Trivial test succeeded, save and bail.
      *  r9 holds A
      */
 .L${opcode}_trivial:
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     andb    $$0xf,rINSTbl               # <- A
     ADVANCE_PC 2
     movl    $$1,%eax
     SET_VREG %eax rINST                 # vA<- true
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
     /*
      * Resolution required.  This is the least-likely path.
      *
-     *  edx holds BBBB
+     *  rIBASE holds BBBB
      *  rINST holds BA
      */
 .L${opcode}_resolve:
-    movl    %edx,OUT_ARG1(%esp)         # arg1<- BBBB
-    movl    rGLUE,%ecx
-    movl    offGlue_method(%ecx),%ecx
+    movl    rIBASE,OUT_ARG1(%esp)         # arg1<- BBBB
+    movl    rSELF,%ecx
+    movl    offThread_method(%ecx),%ecx
     movl    $$1,OUT_ARG2(%esp)          # arg2<- true
     movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
     EXPORT_PC
@@ -87,7 +86,7 @@
  * hold the obj->clazz, and ecx to hold the resolved class
  */
     movl    %eax,%ecx                   # ecx<- resolved class
-    movl    rINST,%eax                # eax<- BA
+    movl    rINST,%eax                  # eax<- BA
     sarl    $$4,%eax                    # eax<- B
     GET_VREG_R %eax %eax                # eax<- vB (obj)
     movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
diff --git a/vm/mterp/x86/OP_INSTANCE_OF_JUMBO.S b/vm/mterp/x86/OP_INSTANCE_OF_JUMBO.S
new file mode 100644
index 0000000..590277e
--- /dev/null
+++ b/vm/mterp/x86/OP_INSTANCE_OF_JUMBO.S
@@ -0,0 +1,88 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    movzwl  8(rPC),%eax                 # eax<- CCCC
+    GET_VREG_R %eax %eax                # eax<- vCCCC (obj)
+    movl    rSELF,%ecx
+    testl   %eax,%eax                   # object null?
+    movl    offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)                       # preserve rIBASE
+    je      .L${opcode}_store           # null obj, not instance, store it
+    movl    2(rPC),rIBASE               # edx<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    movl    (%ecx,rIBASE,4),%ecx        # ecx<- resolved class
+    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
+    testl   %ecx,%ecx                   # have we resolved this before?
+    je      .L${opcode}_resolve         # not resolved, do it now
+.L${opcode}_resolved:  # eax<- obj->clazz, ecx<- resolved class
+    cmpl    %eax,%ecx                   # same class (trivial success)?
+    je      .L${opcode}_trivial         # yes, trivial finish
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  eax holds obj->clazz
+     *  ecx holds class resolved from BBBB
+     *  rINST has BA
+     */
+    movl    %eax,OUT_ARG0(%esp)
+    movl    %ecx,OUT_ARG1(%esp)
+    call    dvmInstanceofNonTrivial     # eax<- boolean result
+    # fall through to ${opcode}_store
+
+    /*
+     * eax holds boolean result
+     * rINST holds BBBB
+     */
+.L${opcode}_store:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    SET_VREG %eax rINST                 # vBBBB<- eax
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds BBBB
+     */
+.L${opcode}_trivial:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    movl    $$1,%eax
+    SET_VREG %eax rINST                 # vBBBB<- true
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  edx holds AAAAAAAA
+     */
+.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)       # arg1<- AAAAAAAA
+    movl    rSELF,%ecx
+    movl    offThread_method(%ecx),%ecx
+    movl    $$1,OUT_ARG2(%esp)          # arg2<- true
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    EXPORT_PC
+    movl    %ecx,OUT_ARG0(%esp)         # arg0<- method->clazz
+    call    dvmResolveClass             # eax<- resolved ClassObject ptr
+    testl   %eax,%eax                   # success?
+    je      common_exceptionThrown      # no, handle exception
+/* Now, we need to sync up with fast path.  We need eax to
+ * hold the obj->clazz, and ecx to hold the resolved class
+ */
+    movl    %eax,%ecx                   # ecx<- resolved class
+    movzwl  8(rPC),%eax                 # eax<- CCCC
+    GET_VREG_R %eax %eax                # eax<- vCCCC (obj)
+    movl    offObject_clazz(%eax),%eax  # eax<- obj->clazz
+    jmp     .L${opcode}_resolved
diff --git a/vm/mterp/x86/OP_INT_TO_LONG.S b/vm/mterp/x86/OP_INT_TO_LONG.S
index 6d5d0aa..551efaf 100644
--- a/vm/mterp/x86/OP_INT_TO_LONG.S
+++ b/vm/mterp/x86/OP_INT_TO_LONG.S
@@ -4,9 +4,11 @@
     sarl    $$4,%eax                    # eax<- B
     GET_VREG_R %eax %eax                # eax<- vB
     andb    $$0xf,rINSTbl               # rINST<- A
-    cltd                                # edx:eax<- sssssssBBBBBBBB
-    SET_VREG_WORD %edx rINST 1          # v[A+1]<- edx/rPC
-    FETCH_INST_OPCODE 1 %edx
+    SPILL(rIBASE)                       # cltd trashes rIBASE/edx
+    cltd                                # rINST:eax<- sssssssBBBBBBBB
+    SET_VREG_WORD rIBASE rINST 1        # v[A+1]<- rIBASE/rPC
+    FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0          # v[A+0]<- %eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_INVOKE_DIRECT.S b/vm/mterp/x86/OP_INVOKE_DIRECT.S
index 3718101..7d27c6f 100644
--- a/vm/mterp/x86/OP_INVOKE_DIRECT.S
+++ b/vm/mterp/x86/OP_INVOKE_DIRECT.S
@@ -13,24 +13,23 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax              # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     EXPORT_PC
     movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
-    movzwl    4(rPC),%edx              # edx<- GFED or CCCC
+    movzwl    4(rPC),rIBASE            # rIBASE<- GFED or CCCC
     movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
     .if       (!$isrange)
-    andl      $$0xf,%edx               # edx<- D (or stays CCCC)
+    andl      $$0xf,rIBASE             # rIBASE<- D (or stays CCCC)
     .endif
     testl     %eax,%eax                # already resolved?
-    GET_VREG_R  %ecx %edx              # ecx<- "this" ptr
+    GET_VREG_R  %ecx rIBASE            # ecx<- "this" ptr
     je        .L${opcode}_resolve      # not resolved, do it now
 .L${opcode}_finish:
     testl     %ecx,%ecx                # null "this"?
     jne       common_invokeMethod${routine}  # no, continue on
     jmp       common_errNullObject
-%break
 
     /*
      * On entry:
@@ -40,8 +39,8 @@
      */
 .L${opcode}_resolve:
      SPILL_TMP1(%ecx)
-     movl     rGLUE,%ecx
-     movl     offGlue_method(%ecx),%ecx  # ecx<- glue->method
+     movl     rSELF,%ecx
+     movl     offThread_method(%ecx),%ecx  # ecx<- self->method
      movzwl   2(rPC),%eax      # reference (BBBB or CCCC)
      movl     offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
      movl     $$METHOD_DIRECT,OUT_ARG2(%esp)
diff --git a/vm/mterp/x86/OP_INVOKE_DIRECT_EMPTY.S b/vm/mterp/x86/OP_INVOKE_DIRECT_EMPTY.S
deleted file mode 100644
index 2fa25e3..0000000
--- a/vm/mterp/x86/OP_INVOKE_DIRECT_EMPTY.S
+++ /dev/null
@@ -1,7 +0,0 @@
-%verify "executed"
-    /*
-     * invoke-direct-empty is a no-op in a "standard" interpreter.
-     */
-    FETCH_INST_WORD 3
-    ADVANCE_PC 3
-    GOTO_NEXT
diff --git a/vm/mterp/x86/OP_INVOKE_DIRECT_JUMBO.S b/vm/mterp/x86/OP_INVOKE_DIRECT_JUMBO.S
new file mode 100644
index 0000000..b1576c5
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_DIRECT_JUMBO.S
@@ -0,0 +1,46 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a jumbo direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     */
+    /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax              # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movzwl    8(rPC),rIBASE            # rIBASE<- CCCC
+    movl      (%ecx,%eax,4),%eax       # eax<- resolved methodToCall
+    testl     %eax,%eax                # already resolved?
+    GET_VREG_R  %ecx rIBASE            # ecx<- "this" ptr
+    je        .L${opcode}_resolve      # not resolved, do it now
+.L${opcode}_finish:
+    testl     %ecx,%ecx                # null "this"?
+    jne       common_invokeMethodJumbo # no, continue on
+    jmp       common_errNullObject
+
+    /*
+     * On entry:
+     *   TMP_SPILL  <- "this" register
+     * Things a bit ugly on this path, but it's the less
+     * frequent one.  We'll have to do some reloading.
+     */
+.L${opcode}_resolve:
+     SPILL_TMP1(%ecx)
+     movl     rSELF,%ecx
+     movl     offThread_method(%ecx),%ecx  # ecx<- self->method
+     movl     2(rPC),%eax      # reference AAAAAAAA
+     movl     offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+     movl     $$METHOD_DIRECT,OUT_ARG2(%esp)
+     movl     %eax,OUT_ARG1(%esp)
+     movl     %ecx,OUT_ARG0(%esp)
+     call     dvmResolveMethod # eax<- call(clazz, ref, flags)
+     UNSPILL_TMP1(%ecx)
+     testl    %eax,%eax
+     jne      .L${opcode}_finish
+     jmp      common_exceptionThrown
diff --git a/vm/mterp/x86/OP_INVOKE_INTERFACE.S b/vm/mterp/x86/OP_INVOKE_INTERFACE.S
index aab94be..3fa4c94 100644
--- a/vm/mterp/x86/OP_INVOKE_INTERFACE.S
+++ b/vm/mterp/x86/OP_INVOKE_INTERFACE.S
@@ -10,7 +10,7 @@
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     movzwl     4(rPC),%eax              # eax<- FEDC or CCCC
-    movl       rGLUE,%ecx
+    movl       rSELF,%ecx
     .if        (!$isrange)
     andl       $$0xf,%eax               # eax<- C (or stays CCCC)
     .endif
@@ -20,16 +20,12 @@
     je         common_errNullObject     # yes, fail
     movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
     movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
-    movl       offGlue_methodClassDex(%ecx),%eax   # eax<- methodClassDex
-    movl       offGlue_method(%ecx),%ecx           # ecx<- method
+    movl       offThread_methodClassDex(%ecx),%eax   # eax<- methodClassDex
+    movl       offThread_method(%ecx),%ecx           # ecx<- method
     movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
     movzwl     2(rPC),%eax                         # eax<- BBBB
     movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
     movl       %eax,OUT_ARG1(%esp)                 # arg1<- BBBB
-    jmp        .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
     call       dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
     testl      %eax,%eax
     je         common_exceptionThrown
diff --git a/vm/mterp/x86/OP_INVOKE_INTERFACE_JUMBO.S b/vm/mterp/x86/OP_INVOKE_INTERFACE_JUMBO.S
new file mode 100644
index 0000000..ee0ecdb
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_INTERFACE_JUMBO.S
@@ -0,0 +1,25 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a jumbo interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movzwl     8(rPC),%eax              # eax<- CCCC
+    movl       rSELF,%ecx
+    GET_VREG_R   %eax %eax              # eax<- "this"
+    EXPORT_PC
+    testl      %eax,%eax                # null this?
+    je         common_errNullObject     # yes, fail
+    movl       offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+    movl       %eax,OUT_ARG0(%esp)                 # arg0<- class
+    movl       offThread_methodClassDex(%ecx),%eax   # eax<- methodClassDex
+    movl       offThread_method(%ecx),%ecx           # ecx<- method
+    movl       %eax,OUT_ARG3(%esp)                 # arg3<- dex
+    movl       2(rPC),%eax                         # eax<- AAAAAAAA
+    movl       %ecx,OUT_ARG2(%esp)                 # arg2<- method
+    movl       %eax,OUT_ARG1(%esp)                 # arg1<- AAAAAAAA
+    call       dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
+    testl      %eax,%eax
+    je         common_exceptionThrown
+    jmp        common_invokeMethodJumbo
diff --git a/vm/mterp/x86/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/x86/OP_INVOKE_OBJECT_INIT_RANGE.S
new file mode 100644
index 0000000..fb84b32
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -0,0 +1,4 @@
+%verify "executed"
+    /*
+     * TODO (currently punting to stub)
+     */
diff --git a/vm/mterp/x86/OP_INVOKE_STATIC.S b/vm/mterp/x86/OP_INVOKE_STATIC.S
index ca89cff..ca68a84 100644
--- a/vm/mterp/x86/OP_INVOKE_STATIC.S
+++ b/vm/mterp/x86/OP_INVOKE_STATIC.S
@@ -8,24 +8,20 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     EXPORT_PC
     movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
     movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
     testl     %eax,%eax
     jne       common_invokeMethod${routine}
-    movl      rGLUE,%ecx
-    movl      offGlue_method(%ecx),%ecx # ecx<- glue->method
+    movl      rSELF,%ecx
+    movl      offThread_method(%ecx),%ecx # ecx<- self->method
     movzwl    2(rPC),%eax
     movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
     movl      %eax,OUT_ARG1(%esp)       # arg1<- BBBB
     movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
-    jmp       .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
     movl      $$METHOD_STATIC,%eax
     movl      %eax,OUT_ARG2(%esp)       # arg2<- flags
     call      dvmResolveMethod          # call(clazz,ref,flags)
diff --git a/vm/mterp/x86/OP_INVOKE_STATIC_JUMBO.S b/vm/mterp/x86/OP_INVOKE_STATIC_JUMBO.S
new file mode 100644
index 0000000..1f98d3d
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_STATIC_JUMBO.S
@@ -0,0 +1,26 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a jumbo static method call.
+     */
+    /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx  # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%eax        # eax<- resolved methodToCall
+    testl     %eax,%eax
+    jne       common_invokeMethodJumbo
+    movl      rSELF,%ecx
+    movl      offThread_method(%ecx),%ecx # ecx<- self->method
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+    movl      %eax,OUT_ARG1(%esp)       # arg1<- AAAAAAAA
+    movl      %ecx,OUT_ARG0(%esp)       # arg0<- clazz
+    movl      $$METHOD_STATIC,%eax
+    movl      %eax,OUT_ARG2(%esp)       # arg2<- flags
+    call      dvmResolveMethod          # call(clazz,ref,flags)
+    testl     %eax,%eax                 # got null?
+    jne       common_invokeMethodJumbo
+    jmp       common_exceptionThrown
diff --git a/vm/mterp/x86/OP_INVOKE_SUPER.S b/vm/mterp/x86/OP_INVOKE_SUPER.S
index 42bd6a6..ecee028 100644
--- a/vm/mterp/x86/OP_INVOKE_SUPER.S
+++ b/vm/mterp/x86/OP_INVOKE_SUPER.S
@@ -8,13 +8,13 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,rINST
+    movl      rSELF,rINST
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(rINST),%ecx # ecx<- pDvmDex
+    movl      offThread_methodClassDex(rINST),%ecx # ecx<- pDvmDex
     EXPORT_PC
     movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
     movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
-    movl      offGlue_method(rINST),%eax # eax<- method
+    movl      offThread_method(rINST),%eax # eax<- method
     movzwl    4(rPC),rINST              # rINST<- GFED or CCCC
     .if       (!$isrange)
     andl      $$0xf,rINST               # rINST<- D (or stays CCCC)
@@ -24,10 +24,7 @@
     je        common_errNullObject      # yes, throw
     movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
     testl     %ecx,%ecx                 # already resolved?
-    jne       .L${opcode}_continue      # yes - go on
-    jmp       .L${opcode}_resolve
-%break
-
+    je       .L${opcode}_resolve
     /*
      * At this point:
      *  ecx = resolved base method [r0]
@@ -66,5 +63,4 @@
      */
 .L${opcode}_nsm:
     movl    offMethod_name(%ecx),%eax
-    mov     %eax,OUT_ARG1(%esp)
     jmp     common_errNoSuchMethod
diff --git a/vm/mterp/x86/OP_INVOKE_SUPER_JUMBO.S b/vm/mterp/x86/OP_INVOKE_SUPER_JUMBO.S
new file mode 100644
index 0000000..d98e14d
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_SUPER_JUMBO.S
@@ -0,0 +1,59 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a jumbo "super" method call.
+     */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,rINST
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(rINST),%ecx # ecx<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved baseMethod
+    movl      offThread_method(rINST),%eax # eax<- method
+    movzwl    8(rPC),rINST              # rINST<- CCCC
+    GET_VREG_R  rINST rINST             # rINST<- "this" ptr
+    testl     rINST,rINST               # null "this"?
+    je        common_errNullObject      # yes, throw
+    movl      offMethod_clazz(%eax),%eax # eax<- method->clazz
+    testl     %ecx,%ecx                 # already resolved?
+    je       .L${opcode}_resolve
+    /*
+     * At this point:
+     *  ecx = resolved base method [r0]
+     *  eax = method->clazz [r9]
+     */
+.L${opcode}_continue:
+    movl    offClassObject_super(%eax),%eax   # eax<- method->clazz->super
+    movzwl  offMethod_methodIndex(%ecx),%ecx  # ecx<- baseMthod->methodIndex
+    cmpl    offClassObject_vtableCount(%eax),%ecx # compare(methodIndex,vtableCount)
+    jae     .L${opcode}_nsm           # method not present in superclass
+    movl    offClassObject_vtable(%eax),%eax   # eax<- ...clazz->super->vtable
+    movl    (%eax,%ecx,4),%eax        # eax<- vtable[methodIndex]
+    jmp     common_invokeMethodJumbo
+
+
+    /* At this point:
+     * ecx = null (needs to be resolved base method)
+     * eax = method->clazz
+    */
+.L${opcode}_resolve:
+    SPILL_TMP1(%eax)                    # method->clazz
+    movl    %eax,OUT_ARG0(%esp)         # arg0<- method->clazz
+    movl    2(rPC),%ecx                 # ecx<- AAAAAAAA
+    movl    $$METHOD_VIRTUAL,OUT_ARG2(%esp)  # arg2<- resolver method type
+    movl    %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    call    dvmResolveMethod            # eax<- call(clazz, ref, flags)
+    testl   %eax,%eax                   # got null?
+    movl    %eax,%ecx                   # ecx<- resolved base method
+    UNSPILL_TMP1(%eax)                  # restore method->clazz
+    jne     .L${opcode}_continue        # good to go - continue
+    jmp     common_exceptionThrown      # handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  ecx = resolved base method
+     */
+.L${opcode}_nsm:
+    movl    offMethod_name(%ecx),%eax
+    jmp     common_errNoSuchMethod
diff --git a/vm/mterp/x86/OP_INVOKE_SUPER_QUICK.S b/vm/mterp/x86/OP_INVOKE_SUPER_QUICK.S
index d02cf13..5fe098c 100644
--- a/vm/mterp/x86/OP_INVOKE_SUPER_QUICK.S
+++ b/vm/mterp/x86/OP_INVOKE_SUPER_QUICK.S
@@ -8,9 +8,9 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    4(rPC),%eax               # eax<- GFED or CCCC
-    movl      offGlue_method(%ecx),%ecx # ecx<- current method
+    movl      offThread_method(%ecx),%ecx # ecx<- current method
     .if       (!$isrange)
     andl      $$0xf,%eax                # eax<- D (or stays CCCC)
     .endif
diff --git a/vm/mterp/x86/OP_INVOKE_VIRTUAL.S b/vm/mterp/x86/OP_INVOKE_VIRTUAL.S
index ab4dc95..24d0170 100644
--- a/vm/mterp/x86/OP_INVOKE_VIRTUAL.S
+++ b/vm/mterp/x86/OP_INVOKE_VIRTUAL.S
@@ -10,22 +10,17 @@
      */
     /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
     /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    movl      rGLUE,%eax
+    movl      rSELF,%eax
     movzwl    2(rPC),%ecx                 # ecx<- BBBB
-    movl      offGlue_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    movl      offThread_methodClassDex(%eax),%eax  # eax<- pDvmDex
     EXPORT_PC
     movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
     movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
     testl     %eax,%eax                   # already resolved?
     jne       .L${opcode}_continue        # yes, continue
-    movl      rGLUE,%eax
+    movl      rSELF,%eax
     movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
-    movl      offGlue_method(%eax),%eax   # eax<- glue->method
-    jmp       .L${opcode}_more
-%break
-
-
-.L${opcode}_more:
+    movl      offThread_method(%eax),%eax   # eax<- self->method
     movl      offMethod_clazz(%eax),%eax  # ecx<- method->clazz
     movl      %eax,OUT_ARG0(%esp)         # arg0<- clazz
     movl      $$METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
diff --git a/vm/mterp/x86/OP_INVOKE_VIRTUAL_JUMBO.S b/vm/mterp/x86/OP_INVOKE_VIRTUAL_JUMBO.S
new file mode 100644
index 0000000..085b591
--- /dev/null
+++ b/vm/mterp/x86/OP_INVOKE_VIRTUAL_JUMBO.S
@@ -0,0 +1,40 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a jumbo virtual method call.
+     */
+    /* invoke-virtual/jumbo vBBBB, {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    movl      rSELF,%eax
+    movl      2(rPC),%ecx                 # ecx<- AAAAAAAA
+    movl      offThread_methodClassDex(%eax),%eax  # eax<- pDvmDex
+    EXPORT_PC
+    movl      offDvmDex_pResMethods(%eax),%eax   # eax<- pDvmDex->pResMethods
+    movl      (%eax,%ecx,4),%eax          # eax<- resolved baseMethod
+    testl     %eax,%eax                   # already resolved?
+    jne       .L${opcode}_continue        # yes, continue
+    movl      rSELF,%eax
+    movl      %ecx,OUT_ARG1(%esp)         # arg1<- ref
+    movl      offThread_method(%eax),%eax   # eax<- self->method
+    movl      offMethod_clazz(%eax),%eax  # ecx<- method->clazz
+    movl      %eax,OUT_ARG0(%esp)         # arg0<- clazz
+    movl      $$METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
+    call      dvmResolveMethod            # eax<- call(clazz, ref, flags)
+    testl     %eax,%eax                   # got null?
+    jne       .L${opcode}_continue        # no, continue
+    jmp       common_exceptionThrown      # yes, handle exception
+
+    /* At this point:
+     *   eax = resolved base method
+     *   ecx = scratch
+     */
+.L${opcode}_continue:
+    movzwl    8(rPC),%ecx               # ecx<- CCCC
+    GET_VREG_R  %ecx %ecx               # ecx<- "this"
+    movzwl    offMethod_methodIndex(%eax),%eax  # eax<- baseMethod->methodIndex
+    testl     %ecx,%ecx                 # null this?
+    je        common_errNullObject      # go if so
+    movl      offObject_clazz(%ecx),%ecx  # ecx<- thisPtr->clazz
+    movl      offClassObject_vtable(%ecx),%ecx # ecx<- thisPtr->clazz->vtable
+    movl      (%ecx,%eax,4),%eax        # eax<- vtable[methodIndex]
+    jmp       common_invokeMethodJumbo
diff --git a/vm/mterp/x86/OP_IPUT.S b/vm/mterp/x86/OP_IPUT.S
index 20d4a1a..2c718b2 100644
--- a/vm/mterp/x86/OP_IPUT.S
+++ b/vm/mterp/x86/OP_IPUT.S
@@ -11,29 +11,25 @@
      * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # %edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL   (rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $$4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $$0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .L${opcode}_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .L${opcode}_resolve
-%break
-
-
-.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -51,7 +47,8 @@
     movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
     testl   %ecx,%ecx                            # object null?
     je      common_errNullObject                 # object was null
-    FETCH_INST_OPCODE 2 %edx
     $store   $reg,(%ecx,%eax,1)            # obj.field <- v[A](8/16/32 bits)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_BOOLEAN_JUMBO.S b/vm/mterp/x86/OP_IPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..eb156ec
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IPUT_JUMBO.S" { "store":"movb","reg":"rINSTbl", "sqnum":"1" }
diff --git a/vm/mterp/x86/OP_IPUT_BYTE_JUMBO.S b/vm/mterp/x86/OP_IPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..e2bdf05
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IPUT_JUMBO.S" { "store":"movb", "reg":"rINSTbl", "sqnum":"2" }
diff --git a/vm/mterp/x86/OP_IPUT_CHAR_JUMBO.S b/vm/mterp/x86/OP_IPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..f2e7592
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IPUT_JUMBO.S" { "store":"movw", "reg":"rINSTw", "sqnum":"3" }
diff --git a/vm/mterp/x86/OP_IPUT_JUMBO.S b/vm/mterp/x86/OP_IPUT_JUMBO.S
new file mode 100644
index 0000000..474cac5
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_JUMBO.S
@@ -0,0 +1,52 @@
+%default { "store":"movl", "reg":"rINST", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-object/jumbo, iput-boolean/jumbo, iput-byte/jumbo,
+            iput-char/jumbo, iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .L${opcode}_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .L${opcode}_finish
+    jmp     common_exceptionThrown
+
+.L${opcode}_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                       # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax   # eax<- byte offset of field
+    testl   %ecx,%ecx                            # object null?
+    je      common_errNullObject                 # object was null
+    $store   $reg,(%ecx,%eax,1)            # obj.field <- v[BBBB](8/16/32 bits)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_OBJECT.S b/vm/mterp/x86/OP_IPUT_OBJECT.S
index 03dcb08..6c9fd6e 100644
--- a/vm/mterp/x86/OP_IPUT_OBJECT.S
+++ b/vm/mterp/x86/OP_IPUT_OBJECT.S
@@ -10,29 +10,25 @@
      * for: iput-object
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $$4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $$0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                  # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .L${opcode}_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .L${opcode}_resolve
-%break
-
-
-.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           # returns InstrField ptr
@@ -44,7 +40,7 @@
      * Currently:
      *   eax holds resolved field
      *   ecx holds object
-     *   %edx is scratch, but needs to be unspilled
+     *   rIBASE is scratch, but needs to be unspilled
      *   rINST holds A
      */
     GET_VREG_R rINST rINST                      # rINST<- v[A]
@@ -52,13 +48,14 @@
     testl   %ecx,%ecx                           # object null?
     je      common_errNullObject                # object was null
     movl    rINST,(%ecx,%eax)      # obj.field <- v[A](8/16/32 bits)
-    movl    rGLUE,%eax
+    movl    rSELF,%eax
     testl   rINST,rINST                         # stored a NULL?
-    movl    offGlue_cardTable(%eax),%eax        # get card table base
-    FETCH_INST_OPCODE 2 %edx
+    movl    offThread_cardTable(%eax),%eax      # get card table base
     je      1f                                  # skip card mark if null store
     shrl    $$GC_CARD_SHIFT,%ecx                # object head to card number
     movb    %al,(%eax,%ecx)                     # mark card using object head
 1:
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_OBJECT_JUMBO.S b/vm/mterp/x86/OP_IPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..c699595
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_OBJECT_JUMBO.S
@@ -0,0 +1,57 @@
+%default { "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo object field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                  # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .L${opcode}_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           # returns InstrField ptr
+    jne     .L${opcode}_finish
+    jmp     common_exceptionThrown
+
+.L${opcode}_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rIBASE is scratch, but needs to be unspilled
+     *   rINST holds BBBB
+     */
+    GET_VREG_R rINST rINST                      # rINST<- v[BBBB]
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    movl    rINST,(%ecx,%eax)      # obj.field <- v[BBBB](8/16/32 bits)
+    movl    rSELF,%eax
+    testl   rINST,rINST                         # stored a NULL?
+    movl    offThread_cardTable(%eax),%eax      # get card table base
+    je      1f                                  # skip card mark if null store
+    shrl    $$GC_CARD_SHIFT,%ecx                # object head to card number
+    movb    %al,(%eax,%ecx)                     # mark card using object head
+1:
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_OBJECT_QUICK.S b/vm/mterp/x86/OP_IPUT_OBJECT_QUICK.S
index 7fa99a8..b628e57 100644
--- a/vm/mterp/x86/OP_IPUT_OBJECT_QUICK.S
+++ b/vm/mterp/x86/OP_IPUT_OBJECT_QUICK.S
@@ -11,17 +11,13 @@
     testl     %ecx,%ecx                 # is object null?
     je        common_errNullObject
     movl      rINST,(%ecx,%eax,1)
-    movl      rGLUE,%eax
-    jmp       .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
+    movl      rSELF,%eax
     testl     rINST,rINST               # did we store null?
-    FETCH_INST_OPCODE 2 %edx
-    movl      offGlue_cardTable(%eax),%eax  # get card table base
+    movl      offThread_cardTable(%eax),%eax  # get card table base
     je        1f                            # skip card mark if null store
     shrl      $$GC_CARD_SHIFT,%ecx          # object head to card number
     movb      %al,(%eax,%ecx)               # mark card based on object head
 1:
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_QUICK.S b/vm/mterp/x86/OP_IPUT_QUICK.S
index 6cec0d1..cd4fe3b 100644
--- a/vm/mterp/x86/OP_IPUT_QUICK.S
+++ b/vm/mterp/x86/OP_IPUT_QUICK.S
@@ -9,8 +9,8 @@
     GET_VREG_R  rINST,rINST             # rINST<- v[A]
     movzwl    2(rPC),%eax               # eax<- field byte offset
     testl     %ecx,%ecx                 # is object null?
-    FETCH_INST_OPCODE 2 %edx
     je        common_errNullObject
     movl      rINST,(%ecx,%eax,1)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_SHORT_JUMBO.S b/vm/mterp/x86/OP_IPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..c121b28
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_IPUT_JUMBO.S" { "store":"movw", "reg":"rINSTw", "sqnum":"4" }
diff --git a/vm/mterp/x86/OP_IPUT_WIDE.S b/vm/mterp/x86/OP_IPUT_WIDE.S
index 435d474..d481d02 100644
--- a/vm/mterp/x86/OP_IPUT_WIDE.S
+++ b/vm/mterp/x86/OP_IPUT_WIDE.S
@@ -8,29 +8,25 @@
      *
      */
     /* op vA, vB, field@CCCC */
-    movl    rGLUE,%ecx
-    movzwl  2(rPC),%edx                         # edx<- 0000CCCC
-    movl    offGlue_methodClassDex(%ecx),%eax   # eax<- DvmDex
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movzwl  2(rPC),rIBASE                       # rIBASE<- 0000CCCC
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
     movzbl  rINSTbl,%ecx                        # ecx<- BA
     sarl    $$4,%ecx                            # ecx<- B
     movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
     andb    $$0xf,rINSTbl                       # rINST<- A
     GET_VREG_R %ecx %ecx                        # ecx<- fp[B], the object ptr
-    movl    (%eax,%edx,4),%eax                  # resolved entry
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
     testl   %eax,%eax                           # is resolved entry null?
     jne     .L${opcode}_finish                  # no, already resolved
-    movl    %edx,OUT_ARG1(%esp)
-    movl    rGLUE,%edx
-    jmp     .L${opcode}_resolve
-%break
-
-
-.L${opcode}_resolve:
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
     EXPORT_PC
-    movl    offGlue_method(%edx),%edx           # edx<- current method
-    movl    offMethod_clazz(%edx),%edx          # edx<- method->clazz
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
     SPILL_TMP1(%ecx)                            # save obj pointer across call
-    movl    %edx,OUT_ARG0(%esp)                 # pass in method->clazz
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
     call    dvmResolveInstField                 #  ... to dvmResolveInstField
     UNSPILL_TMP1(%ecx)
     testl   %eax,%eax                           #  ... which returns InstrField ptr
@@ -42,7 +38,7 @@
      * Currently:
      *   eax holds resolved field
      *   ecx holds object
-     *   %edx is scratch, but needs to be unspilled
+     *   rIBASE is scratch, but needs to be unspilled
      *   rINST holds A
      */
     movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
@@ -51,8 +47,9 @@
     leal    (%ecx,%eax,1),%eax                  # eax<- address of field
     GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
     GET_VREG_WORD rINST rINST 1                 # rINST<- msw
-    FETCH_INST_OPCODE 2 %edx
     movl    rINST,4(%eax)
     movl    %ecx,(%eax)
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_WIDE_JUMBO.S b/vm/mterp/x86/OP_IPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..38c40b1
--- /dev/null
+++ b/vm/mterp/x86/OP_IPUT_WIDE_JUMBO.S
@@ -0,0 +1,52 @@
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit instance field put.
+     */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    movl    rSELF,%ecx
+    SPILL(rIBASE)
+    movl    2(rPC),rIBASE                       # rIBASE<- AAAAAAAA
+    movl    offThread_methodClassDex(%ecx),%eax # eax<- DvmDex
+    movzwl  8(rPC),%ecx                         # ecx<- CCCC
+    movl    offDvmDex_pResFields(%eax),%eax     # eax<- pDvmDex->pResFields
+    GET_VREG_R %ecx %ecx                        # ecx<- fp[CCCC], the object ptr
+    movl    (%eax,rIBASE,4),%eax                # resolved entry
+    testl   %eax,%eax                           # is resolved entry null?
+    jne     .L${opcode}_finish                  # no, already resolved
+    movl    rIBASE,OUT_ARG1(%esp)
+    movl    rSELF,rIBASE
+    EXPORT_PC
+    movl    offThread_method(rIBASE),rIBASE     # rIBASE<- current method
+    movl    offMethod_clazz(rIBASE),rIBASE      # rIBASE<- method->clazz
+    SPILL_TMP1(%ecx)                            # save obj pointer across call
+    movl    rIBASE,OUT_ARG0(%esp)               # pass in method->clazz
+    call    dvmResolveInstField                 #  ... to dvmResolveInstField
+    UNSPILL_TMP1(%ecx)
+    testl   %eax,%eax                           #  ... which returns InstrField ptr
+    jne     .L${opcode}_finish
+    jmp     common_exceptionThrown
+
+.L${opcode}_finish:
+    /*
+     * Currently:
+     *   eax holds resolved field
+     *   ecx holds object
+     *   rIBASE is scratch, but needs to be unspilled
+     *   rINST holds BBBB
+     */
+    movl    offInstField_byteOffset(%eax),%eax  # eax<- byte offset of field
+    testl   %ecx,%ecx                           # object null?
+    je      common_errNullObject                # object was null
+    leal    (%ecx,%eax,1),%eax                  # eax<- address of field
+    GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
+    GET_VREG_WORD rINST rINST 1                 # rINST<- msw
+    movl    rINST,4(%eax)
+    movl    %ecx,(%eax)
+    FETCH_INST_OPCODE 5 %ecx
+    UNSPILL(rIBASE)
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_IPUT_WIDE_QUICK.S b/vm/mterp/x86/OP_IPUT_WIDE_QUICK.S
index 63bf89a..12eeed6 100644
--- a/vm/mterp/x86/OP_IPUT_WIDE_QUICK.S
+++ b/vm/mterp/x86/OP_IPUT_WIDE_QUICK.S
@@ -12,8 +12,8 @@
     andb      $$0xf,rINSTbl             # rINST<- A
     GET_VREG_WORD %eax rINST 0          # eax<- lsw
     GET_VREG_WORD rINST rINST 1         # rINST<- msw
-    FETCH_INST_OPCODE 2 %edx
     movl      %eax,(%ecx)
     movl      rINST,4(%ecx)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MONITOR_ENTER.S b/vm/mterp/x86/OP_MONITOR_ENTER.S
index 848f0fd..a630db1 100644
--- a/vm/mterp/x86/OP_MONITOR_ENTER.S
+++ b/vm/mterp/x86/OP_MONITOR_ENTER.S
@@ -4,26 +4,17 @@
      * Synchronize on an object.
      */
     /* monitor-enter vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_R %eax rINST               # eax<- vAA
-    movl    offGlue_self(%ecx),%ecx     # ecx<- glue->self
     FETCH_INST_WORD 1
     testl   %eax,%eax                   # null object?
-    EXPORT_PC                           # need for precise GC, MONITOR_TRACKING
-    jne     .L${opcode}_continue
-    jmp     common_errNullObject
-%break
-
-.L${opcode}_continue:
+    EXPORT_PC                           # need for precise GC
+    je     common_errNullObject
     movl    %ecx,OUT_ARG0(%esp)
     movl    %eax,OUT_ARG1(%esp)
+    SPILL(rIBASE)
     call    dvmLockObject               # dvmLockObject(self,object)
-#ifdef WITH_DEADLOCK_PREDICTION
-    movl    rGLUE,%ecx
-    movl    offGlueSelf(%ecx),%ecx      # ecx<- glue->self
-    movl    offThread_exception(%ecx),%eax
-    testl   %eax,%eax
-    jne     common_exceptionThrown
-#endif
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MONITOR_EXIT.S b/vm/mterp/x86/OP_MONITOR_EXIT.S
index 7e4e3d0..98bc373 100644
--- a/vm/mterp/x86/OP_MONITOR_EXIT.S
+++ b/vm/mterp/x86/OP_MONITOR_EXIT.S
@@ -10,23 +10,20 @@
      */
     /* monitor-exit vAA */
     GET_VREG_R %eax rINST
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     EXPORT_PC
     testl   %eax,%eax                   # null object?
     je      .L${opcode}_errNullObject   # go if so
-    movl    offGlue_self(%ecx),%ecx     # ecx<- glue->self
     movl    %eax,OUT_ARG1(%esp)
     movl    %ecx,OUT_ARG0(%esp)
-    jmp     .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
+    SPILL(rIBASE)
     call    dvmUnlockObject             # unlock(self,obj)
-    FETCH_INST_OPCODE 1 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     testl   %eax,%eax                   # success?
     ADVANCE_PC 1
     je      common_exceptionThrown      # no, exception pending
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 .L${opcode}_errNullObject:
     ADVANCE_PC 1                        # advance before throw
     jmp     common_errNullObject
diff --git a/vm/mterp/x86/OP_MOVE.S b/vm/mterp/x86/OP_MOVE.S
index 0953bec..ec05288 100644
--- a/vm/mterp/x86/OP_MOVE.S
+++ b/vm/mterp/x86/OP_MOVE.S
@@ -4,8 +4,8 @@
     movzbl rINSTbl,%eax          # eax<- BA
     andb   $$0xf,%al             # eax<- A
     shrl   $$4,rINST            # rINST<- B
-    GET_VREG_R %ecx rINST
-    FETCH_INST_OPCODE 1 %edx
+    GET_VREG_R rINST rINST
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG %ecx %eax           # fp[A]<-fp[B]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax           # fp[A]<-fp[B]
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_16.S b/vm/mterp/x86/OP_MOVE_16.S
index 6554135..e25230a 100644
--- a/vm/mterp/x86/OP_MOVE_16.S
+++ b/vm/mterp/x86/OP_MOVE_16.S
@@ -3,8 +3,8 @@
     /* op vAAAA, vBBBB */
     movzwl    4(rPC),%ecx              # ecx<- BBBB
     movzwl    2(rPC),%eax              # eax<- AAAA
-    GET_VREG_R  %ecx %ecx
-    FETCH_INST_OPCODE 3 %edx
+    GET_VREG_R  rINST %ecx
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    SET_VREG  %ecx %eax
-    GOTO_NEXT_R %edx
+    SET_VREG  rINST %eax
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_EXCEPTION.S b/vm/mterp/x86/OP_MOVE_EXCEPTION.S
index f9542ae..0853866 100644
--- a/vm/mterp/x86/OP_MOVE_EXCEPTION.S
+++ b/vm/mterp/x86/OP_MOVE_EXCEPTION.S
@@ -1,10 +1,9 @@
 %verify "executed"
     /* move-exception vAA */
-    movl    rGLUE,%ecx
-    movl    offGlue_self(%ecx),%ecx    # ecx<- glue->self
+    movl    rSELF,%ecx
     movl    offThread_exception(%ecx),%eax # eax<- dvmGetException bypass
     SET_VREG %eax rINST                # fp[AA]<- exception object
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %eax
     ADVANCE_PC 1
     movl    $$0,offThread_exception(%ecx) # dvmClearException bypass
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_MOVE_FROM16.S b/vm/mterp/x86/OP_MOVE_FROM16.S
index 3c99c55..120edb5 100644
--- a/vm/mterp/x86/OP_MOVE_FROM16.S
+++ b/vm/mterp/x86/OP_MOVE_FROM16.S
@@ -3,8 +3,8 @@
     /* op vAA, vBBBB */
     movzx    rINSTbl,%eax              # eax <= AA
     movw     2(rPC),rINSTw             # rINSTw <= BBBB
-    GET_VREG_R %ecx rINST              # ecx<- fp[BBBB]
-    FETCH_INST_OPCODE 2 %edx
+    GET_VREG_R rINST rINST             # rINST- fp[BBBB]
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    SET_VREG %ecx %eax                # fp[AA]<- ecx]
-    GOTO_NEXT_R %edx
+    SET_VREG rINST %eax                # fp[AA]<- ecx]
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_RESULT.S b/vm/mterp/x86/OP_MOVE_RESULT.S
index 07770cb..4a6917b 100644
--- a/vm/mterp/x86/OP_MOVE_RESULT.S
+++ b/vm/mterp/x86/OP_MOVE_RESULT.S
@@ -1,10 +1,9 @@
 %verify "executed"
     /* for: move-result, move-result-object */
     /* op vAA */
-    movl     rGLUE,%eax                    # eax<- rGLUE
-    movzx    rINSTbl,%ecx                  # ecx<- AA
-    movl     offGlue_retval(%eax),%eax     # eax<- glue->retval.l
-    FETCH_INST_OPCODE 1 %edx
+    movl     rSELF,%eax                    # eax<- rSELF
+    movl     offThread_retval(%eax),%eax   # eax<- self->retval.l
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    SET_VREG  %eax %ecx                    # fp[AA]<- retval.l
-    GOTO_NEXT_R %edx
+    SET_VREG  %eax rINST                   # fp[AA]<- retval.l
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_RESULT_WIDE.S b/vm/mterp/x86/OP_MOVE_RESULT_WIDE.S
index 9f8d315..022bdb3 100644
--- a/vm/mterp/x86/OP_MOVE_RESULT_WIDE.S
+++ b/vm/mterp/x86/OP_MOVE_RESULT_WIDE.S
@@ -1,10 +1,10 @@
 %verify "executed"
     /* move-result-wide vAA */
-    movl    rGLUE,%ecx
-    movl    offGlue_retval(%ecx),%eax
-    movl    4+offGlue_retval(%ecx),%ecx
-    FETCH_INST_OPCODE 1 %edx
+    movl    rSELF,%ecx
+    movl    offThread_retval(%ecx),%eax
+    movl    4+offThread_retval(%ecx),%ecx
     SET_VREG_WORD %eax rINST 0     # v[AA+0] <- eax
     SET_VREG_WORD %ecx rINST 1     # v[AA+1] <- ecx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_WIDE.S b/vm/mterp/x86/OP_MOVE_WIDE.S
index 2d89e3b..df59574 100644
--- a/vm/mterp/x86/OP_MOVE_WIDE.S
+++ b/vm/mterp/x86/OP_MOVE_WIDE.S
@@ -6,8 +6,8 @@
     GET_VREG_WORD %eax rINST 0            # eax<- v[B+0]
     GET_VREG_WORD rINST rINST 1           # rINST<- v[B+1]
     andb      $$0xf,%cl                   # ecx <- A
-    FETCH_INST_OPCODE 1 %edx
     SET_VREG_WORD rINST %ecx 1            # v[A+1]<- rINST
-    ADVANCE_PC 1
     SET_VREG_WORD %eax %ecx 0             # v[A+0]<- eax
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_WIDE_16.S b/vm/mterp/x86/OP_MOVE_WIDE_16.S
index 4cec42c..26ea8a4 100644
--- a/vm/mterp/x86/OP_MOVE_WIDE_16.S
+++ b/vm/mterp/x86/OP_MOVE_WIDE_16.S
@@ -5,8 +5,8 @@
     movzwl    2(rPC),%eax            # eax<- AAAA
     GET_VREG_WORD rINST %ecx 0       # rINSTw_WORD<- v[BBBB+0]
     GET_VREG_WORD %ecx %ecx 1        # ecx<- v[BBBB+1]
-    FETCH_INST_OPCODE 3 %edx
     SET_VREG_WORD rINST %eax 0       # v[AAAA+0]<- rINST
-    ADVANCE_PC 3
     SET_VREG_WORD %ecx %eax 1        # v[AAAA+1]<- ecx
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 3 %ecx
+    ADVANCE_PC 3
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MOVE_WIDE_FROM16.S b/vm/mterp/x86/OP_MOVE_WIDE_FROM16.S
index 6b59c68..3d820ad 100644
--- a/vm/mterp/x86/OP_MOVE_WIDE_FROM16.S
+++ b/vm/mterp/x86/OP_MOVE_WIDE_FROM16.S
@@ -5,8 +5,8 @@
     movzbl    rINSTbl,%eax             # eax<- AAAA
     GET_VREG_WORD rINST %ecx 0         # rINST<- v[BBBB+0]
     GET_VREG_WORD %ecx %ecx 1          # ecx<- v[BBBB+1]
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     SET_VREG_WORD rINST %eax 0         # v[AAAA+0]<- rINST
     SET_VREG_WORD %ecx %eax 1          # v[AAAA+1]<- eax
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_INT.S b/vm/mterp/x86/OP_MUL_INT.S
index df4be1d..a958114 100644
--- a/vm/mterp/x86/OP_MUL_INT.S
+++ b/vm/mterp/x86/OP_MUL_INT.S
@@ -6,8 +6,10 @@
     movzbl   2(rPC),%eax            # eax<- BB
     movzbl   3(rPC),%ecx            # ecx<- CC
     GET_VREG_R %eax %eax            # eax<- vBB
-    imull    (rFP,%ecx,4),%eax      # trashes edx
-    FETCH_INST_OPCODE 2 %edx
+    SPILL(rIBASE)
+    imull    (rFP,%ecx,4),%eax      # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_INT_2ADDR.S b/vm/mterp/x86/OP_MUL_INT_2ADDR.S
index 00d294f..ebd5160 100644
--- a/vm/mterp/x86/OP_MUL_INT_2ADDR.S
+++ b/vm/mterp/x86/OP_MUL_INT_2ADDR.S
@@ -1,11 +1,13 @@
 %verify "executed"
     /* mul vA, vB */
-    movzx   rINSTbl,%ecx               # ecx<- A+
+    movzx   rINSTbl,%ecx              # ecx<- A+
     sarl    $$4,rINST                 # rINST<- B
-    GET_VREG_R %eax rINST              # eax<- vB
-    andb    $$0xf,%cl                  # ecx<- A
-    imull   (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 1 %edx
+    GET_VREG_R %eax rINST             # eax<- vB
+    andb    $$0xf,%cl                 # ecx<- A
+    SPILL(rIBASE)
+    imull   (rFP,%ecx,4),%eax         # trashes rIBASE/edx
+    UNSPILL(rIBASE)
     SET_VREG %eax %ecx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_INT_LIT16.S b/vm/mterp/x86/OP_MUL_INT_LIT16.S
index 57c9d4c..a196c7e 100644
--- a/vm/mterp/x86/OP_MUL_INT_LIT16.S
+++ b/vm/mterp/x86/OP_MUL_INT_LIT16.S
@@ -6,8 +6,10 @@
     GET_VREG_R %eax %eax                # eax<- vB
     movswl   2(rPC),%ecx                # ecx<- ssssCCCC
     andb     $$0xf,rINSTbl              # rINST<- A
-    imull     %ecx,%eax                 # trashes edx
-    FETCH_INST_OPCODE 2 %edx
+    SPILL(rIBASE)
+    imull     %ecx,%eax                 # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_INT_LIT8.S b/vm/mterp/x86/OP_MUL_INT_LIT8.S
index f662d25..f36ffc7 100644
--- a/vm/mterp/x86/OP_MUL_INT_LIT8.S
+++ b/vm/mterp/x86/OP_MUL_INT_LIT8.S
@@ -3,8 +3,10 @@
     movzbl    2(rPC),%eax              # eax<- BB
     movsbl    3(rPC),%ecx              # ecx<- ssssssCC
     GET_VREG_R   %eax %eax             # eax<- rBB
-    imull     %ecx,%eax                # trashes edx
-    FETCH_INST_OPCODE 2 %edx
+    SPILL(rIBASE)
+    imull     %ecx,%eax                # trashes rIBASE/edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     SET_VREG  %eax rINST
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_LONG.S b/vm/mterp/x86/OP_MUL_LONG.S
index 4213299..7cf6ccb 100644
--- a/vm/mterp/x86/OP_MUL_LONG.S
+++ b/vm/mterp/x86/OP_MUL_LONG.S
@@ -15,6 +15,7 @@
     SPILL_TMP2(%esi)                   # save Dalvik PC
     SPILL(rFP)
     SPILL(rINST)
+    SPILL(rIBASE)
     leal      (rFP,%eax,4),%esi        # esi<- &v[B]
     leal      (rFP,%ecx,4),rFP         # rFP<- &v[C]
     movl      4(%esi),%ecx             # ecx<- Bmsw
@@ -26,14 +27,11 @@
     mull      (%esi)                   # eax<- (Clsw*Alsw)
     UNSPILL(rINST)
     UNSPILL(rFP)
-    jmp       .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
-    leal      (%ecx,%edx),%edx     # full result now in %edx:%eax
+    leal      (%ecx,rIBASE),rIBASE # full result now in rIBASE:%eax
     UNSPILL_TMP2(%esi)             # Restore Dalvik PC
     FETCH_INST_OPCODE 2 %ecx       # Fetch next instruction
-    movl      %edx,4(rFP,rINST,4)  # v[B+1]<- %edx
+    movl      rIBASE,4(rFP,rINST,4)# v[B+1]<- rIBASE
+    UNSPILL(rIBASE)
     movl      %eax,(rFP,rINST,4)   # v[B]<- %eax
     ADVANCE_PC 2
     GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_MUL_LONG_2ADDR.S b/vm/mterp/x86/OP_MUL_LONG_2ADDR.S
index b8c41ab..9a0930c 100644
--- a/vm/mterp/x86/OP_MUL_LONG_2ADDR.S
+++ b/vm/mterp/x86/OP_MUL_LONG_2ADDR.S
@@ -3,9 +3,9 @@
      * Signed 64-bit integer multiply, 2-addr version
      *
      * We could definately use more free registers for
-     * this code.  We must spill %edx (edx) because it
+     * this code.  We must spill %edx (rIBASE) because it
      * is used by imul.  We'll also spill rINST (ebx),
-     * giving us eax, ebc, ecx and edx as computational
+     * giving us eax, ebc, ecx and rIBASE as computational
      * temps.  On top of that, we'll spill %esi (edi)
      * for use as the vA pointer and rFP (esi) for use
      * as the vB pointer.  Yuck.
@@ -16,6 +16,7 @@
     sarl      $$4,rINST                # rINST<- B
     SPILL_TMP2(%esi)
     SPILL(rFP)
+    SPILL(rIBASE)
     leal      (rFP,%eax,4),%esi        # %esi<- &v[A]
     leal      (rFP,rINST,4),rFP        # rFP<- &v[B]
     movl      4(%esi),%ecx             # ecx<- Amsw
@@ -25,15 +26,12 @@
     addl      %eax,%ecx                # ecx<- (Amsw*Blsw)+(Bmsw*Alsw)
     movl      (rFP),%eax               # eax<- Blsw
     mull      (%esi)                   # eax<- (Blsw*Alsw)
-    jmp       .L${opcode}_continue
-%break
-
-.L${opcode}_continue:
-    leal      (%ecx,%edx),%edx         # full result now in %edx:%eax
-    movl      %edx,4(%esi)             # v[A+1]<- %edx
+    leal      (%ecx,rIBASE),rIBASE     # full result now in %edx:%eax
+    movl      rIBASE,4(%esi)           # v[A+1]<- rIBASE
     movl      %eax,(%esi)              # v[A]<- %eax
     UNSPILL_TMP2(%esi)
     FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
     UNSPILL(rFP)
     ADVANCE_PC 1
     GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_NEG_LONG.S b/vm/mterp/x86/OP_NEG_LONG.S
index a69afbc..9543351 100644
--- a/vm/mterp/x86/OP_NEG_LONG.S
+++ b/vm/mterp/x86/OP_NEG_LONG.S
@@ -8,8 +8,8 @@
     negl      %eax
     adcl      $$0,%ecx
     negl      %ecx
-    FETCH_INST_OPCODE 1 %edx
     SET_VREG_WORD %eax rINST 0    # v[A+0]<- eax
+    FETCH_INST_OPCODE 1 %eax
     SET_VREG_WORD %ecx rINST 1    # v[A+1]<- ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_NEW_ARRAY.S b/vm/mterp/x86/OP_NEW_ARRAY.S
index 4f36ac2..c9690d3 100644
--- a/vm/mterp/x86/OP_NEW_ARRAY.S
+++ b/vm/mterp/x86/OP_NEW_ARRAY.S
@@ -9,32 +9,29 @@
      * check for it here.
      */
     /* new-array vA, vB, class@CCCC */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     EXPORT_PC
-    movl    offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl    offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
     movzwl  2(rPC),%eax                       # eax<- CCCC
     movl    offDvmDex_pResClasses(%ecx),%ecx  # ecx<- pDvmDex->pResClasses
+    SPILL(rIBASE)
     movl    (%ecx,%eax,4),%ecx                # ecx<- resolved class
     movzbl  rINSTbl,%eax
     sarl    $$4,%eax                          # eax<- B
     GET_VREG_R %eax %eax                      # eax<- vB (array length)
     andb    $$0xf,rINSTbl                     # rINST<- A
     testl   %eax,%eax
-    js      common_errNegativeArraySize       # bail
+    js      common_errNegativeArraySize       # bail, passing len in eax
     testl   %ecx,%ecx                         # already resolved?
     jne     .L${opcode}_finish                # yes, fast path
-    jmp     .L${opcode}_resolve               # resolve now
-%break
-
     /*
      * Resolve class.  (This is an uncommon case.)
      *  ecx holds class (null here)
      *  eax holds array length (vB)
      */
-.L${opcode}_resolve:
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     SPILL_TMP1(%eax)                   # save array length
-    movl    offGlue_method(%ecx),%ecx  # ecx<- glue->method
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
     movzwl  2(rPC),%eax                # eax<- CCCC
     movl    offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
     movl    %eax,OUT_ARG1(%esp)
@@ -58,9 +55,10 @@
     movl    %eax,OUT_ARG1(%esp)
     movl    $$ALLOC_DONT_TRACK,OUT_ARG2(%esp)
     call    dvmAllocArrayByClass    # eax<- call(clazz,length,flags)
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     testl   %eax,%eax               # failed?
     je      common_exceptionThrown  # yup - go handle
     SET_VREG %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_NEW_ARRAY_JUMBO.S b/vm/mterp/x86/OP_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..5d72759
--- /dev/null
+++ b/vm/mterp/x86/OP_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,62 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    movl    rSELF,%ecx
+    EXPORT_PC
+    movl    offThread_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+    movl    2(rPC),%eax                       # eax<- AAAAAAAA
+    movl    offDvmDex_pResClasses(%ecx),%ecx  # ecx<- pDvmDex->pResClasses
+    SPILL(rIBASE)
+    movl    (%ecx,%eax,4),%ecx                # ecx<- resolved class
+    movzwl  8(rPC),%eax                       # eax<- CCCC
+    GET_VREG_R %eax %eax                      # eax<- vCCCC (array length)
+    testl   %eax,%eax
+    js      common_errNegativeArraySize       # bail, passing len in eax
+    testl   %ecx,%ecx                         # already resolved?
+    jne     .L${opcode}_finish                # yes, fast path
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *  ecx holds class (null here)
+     *  eax holds array length (vCCCC)
+     */
+    movl    rSELF,%ecx
+    SPILL_TMP1(%eax)                   # save array length
+    movl    offThread_method(%ecx),%ecx  # ecx<- self->method
+    movl    2(rPC),%eax                # eax<- AAAAAAAA
+    movl    offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $$0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass            # eax<- call(clazz,ref,flag)
+    movl    %eax,%ecx
+    UNSPILL_TMP1(%eax)
+    testl   %ecx,%ecx                  # successful resolution?
+    je      common_exceptionThrown     # no, bail.
+# fall through to ${opcode}_finish
+
+    /*
+     * Finish allocation
+     *
+     * ecx holds class
+     * eax holds array length (vCCCC)
+     */
+.L${opcode}_finish:
+    movl    %ecx,OUT_ARG0(%esp)
+    movl    %eax,OUT_ARG1(%esp)
+    movl    $$ALLOC_DONT_TRACK,OUT_ARG2(%esp)
+    call    dvmAllocArrayByClass    # eax<- call(clazz,length,flags)
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 5 %ecx
+    testl   %eax,%eax               # failed?
+    je      common_exceptionThrown  # yup - go handle
+    SET_VREG %eax rINST
+    ADVANCE_PC 5
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_NEW_INSTANCE.S b/vm/mterp/x86/OP_NEW_INSTANCE.S
index 6ceb933..3e268c4 100644
--- a/vm/mterp/x86/OP_NEW_INSTANCE.S
+++ b/vm/mterp/x86/OP_NEW_INSTANCE.S
@@ -10,9 +10,10 @@
      * Create a new instance of a class.
      */
     /* new-instance vAA, class@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax               # eax<- BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    SPILL(rIBASE)
     movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
     EXPORT_PC
     movl      (%ecx,%eax,4),%ecx        # ecx<- resolved class
@@ -20,24 +21,18 @@
     je        .L${opcode}_resolve       # no, go do it
 .L${opcode}_resolved:  # on entry, ecx<- class
     cmpb      $$CLASS_INITIALIZED,offClassObject_status(%ecx)
-    je        .L${opcode}_initialized
-    jmp       .L${opcode}_needinit
-%break
-
+    jne       .L${opcode}_needinit
 .L${opcode}_initialized:  # on entry, ecx<- class
-    /* TODO: remove test for interface/abstract, now done in verifier */
-    testl     $$(ACC_INTERFACE|ACC_ABSTRACT),offClassObject_accessFlags(%ecx)
     movl      $$ALLOC_DONT_TRACK,OUT_ARG1(%esp)
-    jne       .L${opcode}_abstract
-.L${opcode}_finish: # ecx=class
     movl     %ecx,OUT_ARG0(%esp)
     call     dvmAllocObject             # eax<- new object
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     testl    %eax,%eax                  # success?
     je       common_exceptionThrown     # no, bail out
     SET_VREG %eax rINST
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
     /*
      * Class initialization required.
@@ -58,9 +53,9 @@
      *
      */
 .L${opcode}_resolve:
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     movzwl  2(rPC),%eax
-    movl    offGlue_method(%ecx),%ecx   # ecx<- glue->method
+    movl    offThread_method(%ecx),%ecx   # ecx<- self->method
     movl    %eax,OUT_ARG1(%esp)
     movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
     movl    $$0,OUT_ARG2(%esp)
@@ -70,17 +65,3 @@
     testl   %ecx,%ecx                   # success?
     jne     .L${opcode}_resolved        # good to go
     jmp     common_exceptionThrown      # no, handle exception
-
-    /*
-     * TODO: remove this
-     * We can't instantiate an abstract class or interface, so throw an
-     * InstantiationError with the class descriptor as the message.
-     *
-     *  ecx holds class object
-     */
-.L${opcode}_abstract:
-    movl    offClassObject_descriptor(%ecx),%eax
-    movl    $$.LstrInstantiationError,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowExceptionWithClassMessage
-    jmp     common_exceptionThrown
diff --git a/vm/mterp/x86/OP_NEW_INSTANCE_JUMBO.S b/vm/mterp/x86/OP_NEW_INSTANCE_JUMBO.S
new file mode 100644
index 0000000..aebb2d0
--- /dev/null
+++ b/vm/mterp/x86/OP_NEW_INSTANCE_JUMBO.S
@@ -0,0 +1,67 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax               # eax<- AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- pDvmDex
+    movl      offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+    EXPORT_PC
+    movl      (%ecx,%eax,4),%ecx        # ecx<- resolved class
+    SPILL(rIBASE)
+    testl     %ecx,%ecx                 # resolved?
+    je        .L${opcode}_resolve       # no, go do it
+.L${opcode}_resolved:  # on entry, ecx<- class
+    cmpb      $$CLASS_INITIALIZED,offClassObject_status(%ecx)
+    jne       .L${opcode}_needinit
+.L${opcode}_initialized:  # on entry, ecx<- class
+    movl      $$ALLOC_DONT_TRACK,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    call     dvmAllocObject             # eax<- new object
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 4 %ecx
+    testl    %eax,%eax                  # success?
+    je       common_exceptionThrown     # no, bail out
+    SET_VREG %eax rINST
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Class initialization required.
+     *
+     *  ecx holds class object
+     */
+.L${opcode}_needinit:
+    SPILL_TMP1(%ecx)                    # save object
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmInitClass                # initialize class
+    UNSPILL_TMP1(%ecx)                  # restore object
+    testl   %eax,%eax                   # success?
+    jne     .L${opcode}_initialized     # success, continue
+    jmp     common_exceptionThrown      # go deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     */
+.L${opcode}_resolve:
+    movl    rSELF,%ecx
+    movl    2(rPC),%eax                 # eax<- AAAAAAAA
+    movl    offThread_method(%ecx),%ecx   # ecx<- self->method
+    movl    %eax,OUT_ARG1(%esp)
+    movl    offMethod_clazz(%ecx),%ecx  # ecx<- method->clazz
+    movl    $$0,OUT_ARG2(%esp)
+    movl    %ecx,OUT_ARG0(%esp)
+    call    dvmResolveClass             # call(clazz,off,flags)
+    movl    %eax,%ecx                   # ecx<- resolved ClassObject ptr
+    testl   %ecx,%ecx                   # success?
+    jne     .L${opcode}_resolved        # good to go
+    jmp     common_exceptionThrown      # no, handle exception
diff --git a/vm/mterp/x86/OP_NOP.S b/vm/mterp/x86/OP_NOP.S
index 167d842..99fa2b6 100644
--- a/vm/mterp/x86/OP_NOP.S
+++ b/vm/mterp/x86/OP_NOP.S
@@ -1,4 +1,4 @@
 %verify "executed"
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_NOT_LONG.S b/vm/mterp/x86/OP_NOT_LONG.S
index bdecf46..54e1c44 100644
--- a/vm/mterp/x86/OP_NOT_LONG.S
+++ b/vm/mterp/x86/OP_NOT_LONG.S
@@ -5,10 +5,10 @@
     andb      $$0xf,rINSTbl      # rINST<- A
     GET_VREG_WORD %eax %ecx 0    # eax<- v[B+0]
     GET_VREG_WORD %ecx %ecx 1    # ecx<- v[B+1]
-    FETCH_INST_OPCODE 1 %edx
     notl      %eax
     notl      %ecx
     SET_VREG_WORD %eax rINST 0   # v[A+0]<- eax
+    FETCH_INST_OPCODE 1 %eax
     SET_VREG_WORD %ecx rINST 1   # v[A+1]<- ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_OR_LONG.S b/vm/mterp/x86/OP_OR_LONG.S
index ebeb05a..90f0e3e 100644
--- a/vm/mterp/x86/OP_OR_LONG.S
+++ b/vm/mterp/x86/OP_OR_LONG.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/binopWide.S" {"instr1":"orl (rFP,%ecx,4),%edx", "instr2":"orl 4(rFP,%ecx,4),%eax"}
+%include "x86/binopWide.S" {"instr1":"orl (rFP,%ecx,4),rIBASE", "instr2":"orl 4(rFP,%ecx,4),%eax"}
diff --git a/vm/mterp/x86/OP_PACKED_SWITCH.S b/vm/mterp/x86/OP_PACKED_SWITCH.S
index 1b48635..bae1226 100644
--- a/vm/mterp/x86/OP_PACKED_SWITCH.S
+++ b/vm/mterp/x86/OP_PACKED_SWITCH.S
@@ -15,7 +15,9 @@
     leal    (rPC,%ecx,2),%ecx     # ecx<- PC + BBBBbbbb*2
     movl    %eax,OUT_ARG1(%esp)   # ARG1<- vAA
     movl    %ecx,OUT_ARG0(%esp)   # ARG0<- switchData
+    SPILL(rIBASE)
     call    $func
+    UNSPILL(rIBASE)
     testl   %eax,%eax
     movl    %eax,rINST            # set up word offset
     jle     common_backwardBranch # check on special actions
diff --git a/vm/mterp/x86/OP_REM_DOUBLE.S b/vm/mterp/x86/OP_REM_DOUBLE.S
index bad3e22..d670a31 100644
--- a/vm/mterp/x86/OP_REM_DOUBLE.S
+++ b/vm/mterp/x86/OP_REM_DOUBLE.S
@@ -4,8 +4,7 @@
     movzbl   2(rPC),%eax            # eax<- CC
     fldl     (rFP,%ecx,4)           # vCC to fp stack
     fldl     (rFP,%eax,4)           # vCC to fp stack
-    movzbl   rINSTbl,%ecx           # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
 1:
     fprem
     fstsw     %ax
@@ -13,5 +12,5 @@
     jp        1b
     fstp      %st(1)
     ADVANCE_PC 2
-    fstpl    (rFP,%ecx,4)           # %st to vAA
-    GOTO_NEXT_R %edx
+    fstpl    (rFP,rINST,4)           # %st to vAA
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_REM_DOUBLE_2ADDR.S b/vm/mterp/x86/OP_REM_DOUBLE_2ADDR.S
index aaee1d4..ee751d0 100644
--- a/vm/mterp/x86/OP_REM_DOUBLE_2ADDR.S
+++ b/vm/mterp/x86/OP_REM_DOUBLE_2ADDR.S
@@ -5,13 +5,13 @@
     fldl     (rFP,rINST,4)              # vBB to fp stack
     andb    $$0xf,%cl                   # ecx<- A
     fldl     (rFP,%ecx,4)               # vAA to fp stack
-    FETCH_INST_OPCODE 1 %edx
 1:
     fprem
     fstsw     %ax
     sahf
     jp        1b
     fstp      %st(1)
+    FETCH_INST_OPCODE 1 %eax
     ADVANCE_PC 1
     fstpl    (rFP,%ecx,4)               # %st to vA
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_REM_FLOAT.S b/vm/mterp/x86/OP_REM_FLOAT.S
index 12af510..342bf3e 100644
--- a/vm/mterp/x86/OP_REM_FLOAT.S
+++ b/vm/mterp/x86/OP_REM_FLOAT.S
@@ -5,13 +5,13 @@
     flds     (rFP,%ecx,4)           # vCC to fp stack
     flds     (rFP,%eax,4)           # vCC to fp stack
     movzbl   rINSTbl,%ecx           # ecx<- AA
-    FETCH_INST_OPCODE 2 %edx
 1:
     fprem
     fstsw     %ax
     sahf
     jp        1b
     fstp      %st(1)
+    FETCH_INST_OPCODE 2 %eax
     ADVANCE_PC 2
     fstps    (rFP,%ecx,4)           # %st to vAA
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_REM_FLOAT_2ADDR.S b/vm/mterp/x86/OP_REM_FLOAT_2ADDR.S
index 6a5f716..1f494bd 100644
--- a/vm/mterp/x86/OP_REM_FLOAT_2ADDR.S
+++ b/vm/mterp/x86/OP_REM_FLOAT_2ADDR.S
@@ -5,13 +5,13 @@
     flds     (rFP,rINST,4)              # vBB to fp stack
     andb    $$0xf,%cl                   # ecx<- A
     flds     (rFP,%ecx,4)               # vAA to fp stack
-    FETCH_INST_OPCODE 1 %edx
 1:
     fprem
     fstsw     %ax
     sahf
     jp        1b
     fstp      %st(1)
+    FETCH_INST_OPCODE 1 %eax
     ADVANCE_PC 1
     fstps    (rFP,%ecx,4)               # %st to vA
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/OP_REM_INT.S b/vm/mterp/x86/OP_REM_INT.S
index 601b383..6e4fd04 100644
--- a/vm/mterp/x86/OP_REM_INT.S
+++ b/vm/mterp/x86/OP_REM_INT.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/bindiv.S" {"result":"%edx","special":"$0"}
+%include "x86/bindiv.S" {"result":"rIBASE","special":"$0"}
diff --git a/vm/mterp/x86/OP_REM_INT_2ADDR.S b/vm/mterp/x86/OP_REM_INT_2ADDR.S
index cfb60bd..4a11617 100644
--- a/vm/mterp/x86/OP_REM_INT_2ADDR.S
+++ b/vm/mterp/x86/OP_REM_INT_2ADDR.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/bindiv2addr.S" {"result":"%edx","special":"$0"}
+%include "x86/bindiv2addr.S" {"result":"rIBASE","special":"$0"}
diff --git a/vm/mterp/x86/OP_REM_INT_LIT16.S b/vm/mterp/x86/OP_REM_INT_LIT16.S
index a5fdb1e..5c4afd6 100644
--- a/vm/mterp/x86/OP_REM_INT_LIT16.S
+++ b/vm/mterp/x86/OP_REM_INT_LIT16.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/bindivLit16.S" {"result":"%edx","special":"$0"}
+%include "x86/bindivLit16.S" {"result":"rIBASE","special":"$0"}
diff --git a/vm/mterp/x86/OP_REM_INT_LIT8.S b/vm/mterp/x86/OP_REM_INT_LIT8.S
index 9d06fcd..53e12ee 100644
--- a/vm/mterp/x86/OP_REM_INT_LIT8.S
+++ b/vm/mterp/x86/OP_REM_INT_LIT8.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/bindivLit8.S" {"result":"%edx","special":"$0"}
+%include "x86/bindivLit8.S" {"result":"rIBASE","special":"$0"}
diff --git a/vm/mterp/x86/OP_RETURN.S b/vm/mterp/x86/OP_RETURN.S
index 657903d..082b82e 100644
--- a/vm/mterp/x86/OP_RETURN.S
+++ b/vm/mterp/x86/OP_RETURN.S
@@ -1,12 +1,12 @@
 %verify "executed"
     /*
-     * Return a 32-bit value.  Copies the return value into the "glue"
+     * Return a 32-bit value.  Copies the return value into the "self"
      * structure, then jumps to the return handler.
      *
      * for: return, return-object
      */
     /* op vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_R %eax rINST               # eax<- vAA
-    movl    %eax,offGlue_retval(%ecx)   # retval.i <- AA
+    movl    %eax,offThread_retval(%ecx)   # retval.i <- AA
     jmp     common_returnFromMethod
diff --git a/vm/mterp/x86/OP_RETURN_WIDE.S b/vm/mterp/x86/OP_RETURN_WIDE.S
index 049030f..c8e7df5 100644
--- a/vm/mterp/x86/OP_RETURN_WIDE.S
+++ b/vm/mterp/x86/OP_RETURN_WIDE.S
@@ -1,12 +1,12 @@
 %verify "executed"
     /*
-     * Return a 64-bit value.  Copies the return value into the "glue"
+     * Return a 64-bit value.  Copies the return value into the "self"
      * structure, then jumps to the return handler.
      */
     /* return-wide vAA */
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     GET_VREG_WORD %eax rINST 0       # eax<- v[AA+0]
     GET_VREG_WORD rINST rINST 1      # rINST<- v[AA+1]
-    movl    %eax,offGlue_retval(%ecx)
-    movl    rINST,4+offGlue_retval(%ecx)
+    movl    %eax,offThread_retval(%ecx)
+    movl    rINST,4+offThread_retval(%ecx)
     jmp     common_returnFromMethod
diff --git a/vm/mterp/x86/OP_SGET.S b/vm/mterp/x86/OP_SGET.S
index 0be038a..8ff0632 100644
--- a/vm/mterp/x86/OP_SGET.S
+++ b/vm/mterp/x86/OP_SGET.S
@@ -8,33 +8,34 @@
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
     /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
     movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
     movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
     testl     %eax,%eax                          # resolved entry null?
     je        .L${opcode}_resolve                # if not, make it so
 .L${opcode}_finish:     # field ptr in eax
     movl      offStaticField_value(%eax),%eax
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     SET_VREG %eax rINST
-    GOTO_NEXT_R %edx
-%break
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .L${opcode}_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .L${opcode}_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SGET_BOOLEAN_JUMBO.S b/vm/mterp/x86/OP_SGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..dee15fc
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SGET_BYTE_JUMBO.S b/vm/mterp/x86/OP_SGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..dee15fc
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SGET_CHAR_JUMBO.S b/vm/mterp/x86/OP_SGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..dee15fc
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SGET_JUMBO.S b/vm/mterp/x86/OP_SGET_JUMBO.S
new file mode 100644
index 0000000..2b3e2a8
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_JUMBO.S
@@ -0,0 +1,42 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .L${opcode}_resolve                # if not, make it so
+.L${opcode}_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%eax
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    SET_VREG %eax rINST
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.L${opcode}_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .L${opcode}_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SGET_OBJECT_JUMBO.S b/vm/mterp/x86/OP_SGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..dee15fc
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SGET_SHORT_JUMBO.S b/vm/mterp/x86/OP_SGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..dee15fc
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SGET_WIDE.S b/vm/mterp/x86/OP_SGET_WIDE.S
index 54e7f10..432763d 100644
--- a/vm/mterp/x86/OP_SGET_WIDE.S
+++ b/vm/mterp/x86/OP_SGET_WIDE.S
@@ -7,9 +7,9 @@
      *
      */
     /* sget-wide vAA, field@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
     movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
     movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
     testl     %eax,%eax                          # resolved entry null?
@@ -17,25 +17,26 @@
 .L${opcode}_finish:     # field ptr in eax
     movl      offStaticField_value(%eax),%ecx    # ecx<- lsw
     movl      4+offStaticField_value(%eax),%eax  # eax<- msw
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
     SET_VREG_WORD %eax rINST 1
-    GOTO_NEXT_R %edx
-%break
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .L${opcode}_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .L${opcode}_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SGET_WIDE_JUMBO.S b/vm/mterp/x86/OP_SGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..2d8fa8f
--- /dev/null
+++ b/vm/mterp/x86/OP_SGET_WIDE_JUMBO.S
@@ -0,0 +1,42 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SGET handler.
+     *
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .L${opcode}_resolve                # if not, make it so
+.L${opcode}_finish:     # field ptr in eax
+    movl      offStaticField_value(%eax),%ecx    # ecx<- lsw
+    movl      4+offStaticField_value(%eax),%eax  # eax<- msw
+    SET_VREG_WORD %ecx rINST 0
+    FETCH_INST_OPCODE 2 %ecx
+    SET_VREG_WORD %eax rINST 1
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.L${opcode}_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .L${opcode}_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SHL_LONG.S b/vm/mterp/x86/OP_SHL_LONG.S
index f181340..0750f80 100644
--- a/vm/mterp/x86/OP_SHL_LONG.S
+++ b/vm/mterp/x86/OP_SHL_LONG.S
@@ -9,26 +9,24 @@
      */
     /* shl-long vAA, vBB, vCC */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rINST */
     /* rINSTw gets AA */
     movzbl    2(rPC),%eax               # eax<- BB
     movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # ecx<- v[BB+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # ecx<- v[BB+1]
     GET_VREG_R   %ecx %ecx              # ecx<- vCC
     GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shldl     %eax,%edx
+    shldl     %eax,rIBASE
     sall      %cl,%eax
     testb     $$32,%cl
     je        2f
-    movl      %eax,%edx
+    movl      %eax,rIBASE
     xorl      %eax,%eax
 2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- %edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
+    SET_VREG_WORD rIBASE rINST 1        # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0          # v[AA+0]<- %eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_SHL_LONG_2ADDR.S b/vm/mterp/x86/OP_SHL_LONG_2ADDR.S
index 30f3d1b..b8bbce9 100644
--- a/vm/mterp/x86/OP_SHL_LONG_2ADDR.S
+++ b/vm/mterp/x86/OP_SHL_LONG_2ADDR.S
@@ -5,28 +5,25 @@
      */
     /* shl-long/2addr vA, vB */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rIBASE */
     /* rINSTw gets AA */
     movzbl    rINSTbl,%ecx             # ecx<- BA
     andb      $$0xf,rINSTbl            # rINST<- A
     GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
     sarl      $$4,%ecx                 # ecx<- B
-    GET_VREG_WORD %edx rINST 1         # edx<- v[AA+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1       # rIBASE<- v[AA+1]
     GET_VREG_R  %ecx %ecx              # ecx<- vBB
-    shldl     %eax,%edx
+    shldl     %eax,rIBASE
     sall      %cl,%eax
     testb     $$32,%cl
     je        2f
-    movl      %eax,%edx
+    movl      %eax,rIBASE
     xorl      %eax,%eax
 2:
-    SET_VREG_WORD %edx rINST 1         # v[AA+1]<- edx
-    jmp       .L${opcode}_finish
-%break
-
-
-.L${opcode}_finish:
-    FETCH_INST_OPCODE 1 %edx
+    SET_VREG_WORD rIBASE rINST 1       # v[AA+1]<- rIBASE
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     SET_VREG_WORD %eax rINST 0         # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_SHR_LONG.S b/vm/mterp/x86/OP_SHR_LONG.S
index 45a07ae..d79d624 100644
--- a/vm/mterp/x86/OP_SHR_LONG.S
+++ b/vm/mterp/x86/OP_SHR_LONG.S
@@ -9,27 +9,24 @@
      */
     /* shr-long vAA, vBB, vCC */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rIBASE */
     /* rINSTw gets AA */
     movzbl    2(rPC),%eax               # eax<- BB
     movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # edx<- v[BB+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # rIBASE<- v[BB+1]
     GET_VREG_R   %ecx %ecx              # ecx<- vCC
     GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shrdl     %edx,%eax
-    sarl      %cl,%edx
+    shrdl     rIBASE,%eax
+    sarl      %cl,rIBASE
     testb     $$32,%cl
     je        2f
-    movl      %edx,%eax
-    sarl      $$31,%edx
+    movl      rIBASE,%eax
+    sarl      $$31,rIBASE
 2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .L${opcode}_finish
-%break
-
-
-.L${opcode}_finish:
+    SET_VREG_WORD rIBASE rINST 1        # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0          # v[AA+0]<- eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_SHR_LONG_2ADDR.S b/vm/mterp/x86/OP_SHR_LONG_2ADDR.S
index 9987cf7..d380e12 100644
--- a/vm/mterp/x86/OP_SHR_LONG_2ADDR.S
+++ b/vm/mterp/x86/OP_SHR_LONG_2ADDR.S
@@ -5,28 +5,25 @@
      */
     /* shl-long/2addr vA, vB */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rIBASE */
     /* rINSTw gets AA */
     movzbl    rINSTbl,%ecx         # ecx<- BA
     andb      $$0xf,rINSTbl        # rINST<- A
     GET_VREG_WORD %eax rINST 0     # eax<- v[AA+0]
     sarl      $$4,%ecx             # ecx<- B
-    GET_VREG_WORD %edx rINST 1     # edx<- v[AA+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1   # rIBASE<- v[AA+1]
     GET_VREG_R %ecx %ecx           # ecx<- vBB
-    shrdl     %edx,%eax
-    sarl      %cl,%edx
+    shrdl     rIBASE,%eax
+    sarl      %cl,rIBASE
     testb     $$32,%cl
     je        2f
-    movl      %edx,%eax
-    sarl      $$31,%edx
+    movl      rIBASE,%eax
+    sarl      $$31,rIBASE
 2:
-    SET_VREG_WORD %edx rINST 1     # v[AA+1]<- edx
-    jmp       .L${opcode}_finish
-%break
-
-
-.L${opcode}_finish:
-    FETCH_INST_OPCODE 1 %edx
+    SET_VREG_WORD rIBASE rINST 1   # v[AA+1]<- rIBASE
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     SET_VREG_WORD %eax rINST 0    # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_SPUT.S b/vm/mterp/x86/OP_SPUT.S
index e293838..bc76533 100644
--- a/vm/mterp/x86/OP_SPUT.S
+++ b/vm/mterp/x86/OP_SPUT.S
@@ -8,33 +8,34 @@
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
     movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
     movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
     testl     %eax,%eax                          # resolved entry null?
     je        .L${opcode}_resolve                # if not, make it so
 .L${opcode}_finish:     # field ptr in eax
-    GET_VREG_R  %ecx rINST
-    FETCH_INST_OPCODE 2 %edx
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    movl      %ecx,offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-%break
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .L${opcode}_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .L${opcode}_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SPUT_BOOLEAN_JUMBO.S b/vm/mterp/x86/OP_SPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..c791c49
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SPUT_BYTE_JUMBO.S b/vm/mterp/x86/OP_SPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..c791c49
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SPUT_CHAR_JUMBO.S b/vm/mterp/x86/OP_SPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..c791c49
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SPUT_JUMBO.S b/vm/mterp/x86/OP_SPUT_JUMBO.S
new file mode 100644
index 0000000..3f3d42a
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_JUMBO.S
@@ -0,0 +1,42 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .L${opcode}_resolve                # if not, make it so
+.L${opcode}_finish:     # field ptr in eax
+    GET_VREG_R  rINST rINST
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    movl      rINST,offStaticField_value(%eax)
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.L${opcode}_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .L${opcode}_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SPUT_OBJECT.S b/vm/mterp/x86/OP_SPUT_OBJECT.S
index dbfcd02..45d84c7 100644
--- a/vm/mterp/x86/OP_SPUT_OBJECT.S
+++ b/vm/mterp/x86/OP_SPUT_OBJECT.S
@@ -6,9 +6,9 @@
      * SPUT object handler.
      */
     /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
     movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
     movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
     testl     %eax,%eax                          # resolved entry null?
@@ -16,33 +16,30 @@
 .L${opcode}_finish:                              # field ptr in eax
     movzbl    rINSTbl,%ecx                       # ecx<- AA
     GET_VREG_R  %ecx %ecx
-    jmp       .L${opcode}_continue
-%break
-
-
-.L${opcode}_continue:
     movl      %ecx,offStaticField_value(%eax)    # do the store
     testl     %ecx,%ecx                          # stored null object ptr?
-    FETCH_INST_OPCODE 2 %edx
     je        1f                                 # skip card mark if null
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movl      offField_clazz(%eax),%eax          # eax<- method->clazz
-    movl      offGlue_cardTable(%ecx),%ecx       # get card table base
+    movl      offThread_cardTable(%ecx),%ecx       # get card table base
     shrl      $$GC_CARD_SHIFT,%eax               # head to card number
     movb      %cl,(%ecx,%eax)                    # mark card
 1:
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .L${opcode}_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SPUT_OBJECT_JUMBO.S b/vm/mterp/x86/OP_SPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..5aab782
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_OBJECT_JUMBO.S
@@ -0,0 +1,44 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo SPUT object handler.
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField
+    testl     %eax,%eax                          # resolved entry null?
+    je        .L${opcode}_resolve                # if not, make it so
+.L${opcode}_finish:                              # field ptr in eax
+    GET_VREG_R  %ecx rINST
+    movl      %ecx,offStaticField_value(%eax)    # do the store
+    testl     %ecx,%ecx                          # stored null object ptr?
+    je        1f                                 # skip card mark if null
+    movl      rSELF,%ecx
+    movl      offField_clazz(%eax),%eax          # eax<- method->clazz
+    movl      offThread_cardTable(%ecx),%ecx       # get card table base
+    shrl      $$GC_CARD_SHIFT,%eax               # head to card number
+    movb      %cl,(%ecx,%eax)                    # mark card
+1:
+    FETCH_INST_OPCODE 4 %ecx
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+.L${opcode}_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .L${opcode}_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SPUT_SHORT_JUMBO.S b/vm/mterp/x86/OP_SPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..c791c49
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "x86/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/x86/OP_SPUT_WIDE.S b/vm/mterp/x86/OP_SPUT_WIDE.S
index 43d5509..d4c5841 100644
--- a/vm/mterp/x86/OP_SPUT_WIDE.S
+++ b/vm/mterp/x86/OP_SPUT_WIDE.S
@@ -8,9 +8,9 @@
      * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
      */
     /* op vAA, field@BBBB */
-    movl      rGLUE,%ecx
+    movl      rSELF,%ecx
     movzwl    2(rPC),%eax                        # eax<- field ref BBBB
-    movl      offGlue_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
     movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
     movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
     testl     %eax,%eax                          # resolved entry null?
@@ -18,25 +18,26 @@
 .L${opcode}_finish:     # field ptr in eax
     GET_VREG_WORD %ecx rINST 0                  # rINST<- lsw
     GET_VREG_WORD rINST rINST 1                 # ecx<- msw
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     movl      %ecx,offStaticField_value(%eax)
+    FETCH_INST_OPCODE 2 %ecx
     movl      rINST,4+offStaticField_value(%eax)
-    GOTO_NEXT_R %edx
-%break
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
     /*
      * Go resolve the field
      */
 .L${opcode}_resolve:
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                        # eax<- field ref BBBB
-    movl     offGlue_method(%ecx),%ecx          # ecx<- current method
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
     EXPORT_PC                                   # could throw, need to export
     movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
     movl     %eax,OUT_ARG1(%esp)
     movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
     call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
     testl    %eax,%eax
     jne      .L${opcode}_finish                 # success, continue
     jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SPUT_WIDE_JUMBO.S b/vm/mterp/x86/OP_SPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..1fe544c
--- /dev/null
+++ b/vm/mterp/x86/OP_SPUT_WIDE_JUMBO.S
@@ -0,0 +1,41 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    movl      rSELF,%ecx
+    movl      offThread_methodClassDex(%ecx),%ecx  # ecx<- DvmDex
+    movl      2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl      offDvmDex_pResFields(%ecx),%ecx    # ecx<- dvmDex->pResFields
+    movl      (%ecx,%eax,4),%eax                 # eax<- resolved StaticField ptr
+    testl     %eax,%eax                          # resolved entry null?
+    je        .L${opcode}_resolve                # if not, make it so
+.L${opcode}_finish:     # field ptr in eax
+    GET_VREG_WORD %ecx rINST 0                  # ecx<- lsw
+    GET_VREG_WORD rINST rINST 1                 # rINST<- msw
+    movl      %ecx,offStaticField_value(%eax)
+    FETCH_INST_OPCODE 4 %ecx
+    movl      rINST,4+offStaticField_value(%eax)
+    ADVANCE_PC 4
+    GOTO_NEXT_R %ecx
+
+    /*
+     * Go resolve the field
+     */
+.L${opcode}_resolve:
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                        # eax<- field ref AAAAAAAA
+    movl     offThread_method(%ecx),%ecx          # ecx<- current method
+    EXPORT_PC                                   # could throw, need to export
+    movl     offMethod_clazz(%ecx),%ecx         # ecx<- method->clazz
+    movl     %eax,OUT_ARG1(%esp)
+    movl     %ecx,OUT_ARG0(%esp)
+    SPILL(rIBASE)
+    call     dvmResolveStaticField              # eax<- resolved StaticField ptr
+    UNSPILL(rIBASE)
+    testl    %eax,%eax
+    jne      .L${opcode}_finish                 # success, continue
+    jmp      common_exceptionThrown             # no, handle exception
diff --git a/vm/mterp/x86/OP_SUB_LONG.S b/vm/mterp/x86/OP_SUB_LONG.S
index cd95435..485818c 100644
--- a/vm/mterp/x86/OP_SUB_LONG.S
+++ b/vm/mterp/x86/OP_SUB_LONG.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/binopWide.S" {"instr1":"subl (rFP,%ecx,4),%edx", "instr2":"sbbl 4(rFP,%ecx,4),%eax"}
+%include "x86/binopWide.S" {"instr1":"subl (rFP,%ecx,4),rIBASE", "instr2":"sbbl 4(rFP,%ecx,4),%eax"}
diff --git a/vm/mterp/x86/OP_THROW.S b/vm/mterp/x86/OP_THROW.S
index 6884f78..6559a29 100644
--- a/vm/mterp/x86/OP_THROW.S
+++ b/vm/mterp/x86/OP_THROW.S
@@ -4,10 +4,9 @@
      * Throw an exception object in the current thread.
      */
     /* throw vAA */
-    movl     rGLUE,%ecx
     EXPORT_PC
     GET_VREG_R %eax rINST              # eax<- exception object
-    movl     offGlue_self(%ecx),%ecx   # ecx<- glue->self
+    movl     rSELF,%ecx                # ecx<- self
     testl    %eax,%eax                 # null object?
     je       common_errNullObject
     movl     %eax,offThread_exception(%ecx) # thread->exception<- obj
diff --git a/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR.S b/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR.S
index c32e2d7..c934bdb 100644
--- a/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR.S
+++ b/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR.S
@@ -5,9 +5,9 @@
      * exception is indicated by AA, with some detail provided by BBBB.
      */
     /* op AA, ref@BBBB */
-    movl     rGLUE,%ecx
+    movl     rSELF,%ecx
     movzwl   2(rPC),%eax                     # eax<- BBBB
-    movl     offGlue_method(%ecx),%ecx       # ecx<- glue->method
+    movl     offThread_method(%ecx),%ecx       # ecx<- self->method
     EXPORT_PC
     movl     %eax,OUT_ARG2(%esp)             # arg2<- BBBB
     movl     rINST,OUT_ARG1(%esp)            # arg1<- AA
diff --git a/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR_JUMBO.S b/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR_JUMBO.S
new file mode 100644
index 0000000..a9e092d
--- /dev/null
+++ b/vm/mterp/x86/OP_THROW_VERIFICATION_ERROR_JUMBO.S
@@ -0,0 +1,16 @@
+%verify executed
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+    /* exop BBBB, ref@AAAAAAAA */
+    movl     rSELF,%ecx
+    movl     2(rPC),%eax                     # eax<- AAAAAAAA
+    movl     offThread_method(%ecx),%ecx       # ecx<- self->method
+    EXPORT_PC
+    movl     %eax,OUT_ARG2(%esp)             # arg2<- AAAAAAAA
+    movl     rINST,OUT_ARG1(%esp)            # arg1<- BBBB
+    movl     %ecx,OUT_ARG0(%esp)             # arg0<- method
+    call     dvmThrowVerificationError       # call(method, kind, ref)
+    jmp      common_exceptionThrown          # handle exception
diff --git a/vm/mterp/x86/OP_UNUSED_27FF.S b/vm/mterp/x86/OP_UNUSED_27FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_27FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_28FF.S b/vm/mterp/x86/OP_UNUSED_28FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_28FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_29FF.S b/vm/mterp/x86/OP_UNUSED_29FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_29FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2AFF.S b/vm/mterp/x86/OP_UNUSED_2AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2BFF.S b/vm/mterp/x86/OP_UNUSED_2BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2CFF.S b/vm/mterp/x86/OP_UNUSED_2CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2DFF.S b/vm/mterp/x86/OP_UNUSED_2DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2EFF.S b/vm/mterp/x86/OP_UNUSED_2EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_2FFF.S b/vm/mterp/x86/OP_UNUSED_2FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_2FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_30FF.S b/vm/mterp/x86/OP_UNUSED_30FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_30FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_31FF.S b/vm/mterp/x86/OP_UNUSED_31FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_31FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_32FF.S b/vm/mterp/x86/OP_UNUSED_32FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_32FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_33FF.S b/vm/mterp/x86/OP_UNUSED_33FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_33FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_34FF.S b/vm/mterp/x86/OP_UNUSED_34FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_34FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_35FF.S b/vm/mterp/x86/OP_UNUSED_35FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_35FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_36FF.S b/vm/mterp/x86/OP_UNUSED_36FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_36FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_37FF.S b/vm/mterp/x86/OP_UNUSED_37FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_37FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_38FF.S b/vm/mterp/x86/OP_UNUSED_38FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_38FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_39FF.S b/vm/mterp/x86/OP_UNUSED_39FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_39FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3AFF.S b/vm/mterp/x86/OP_UNUSED_3AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3BFF.S b/vm/mterp/x86/OP_UNUSED_3BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3CFF.S b/vm/mterp/x86/OP_UNUSED_3CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3DFF.S b/vm/mterp/x86/OP_UNUSED_3DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3EFF.S b/vm/mterp/x86/OP_UNUSED_3EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_3FFF.S b/vm/mterp/x86/OP_UNUSED_3FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_3FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_40FF.S b/vm/mterp/x86/OP_UNUSED_40FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_40FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_41FF.S b/vm/mterp/x86/OP_UNUSED_41FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_41FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_42FF.S b/vm/mterp/x86/OP_UNUSED_42FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_42FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_43FF.S b/vm/mterp/x86/OP_UNUSED_43FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_43FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_44FF.S b/vm/mterp/x86/OP_UNUSED_44FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_44FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_45FF.S b/vm/mterp/x86/OP_UNUSED_45FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_45FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_46FF.S b/vm/mterp/x86/OP_UNUSED_46FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_46FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_47FF.S b/vm/mterp/x86/OP_UNUSED_47FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_47FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_48FF.S b/vm/mterp/x86/OP_UNUSED_48FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_48FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_49FF.S b/vm/mterp/x86/OP_UNUSED_49FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_49FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4AFF.S b/vm/mterp/x86/OP_UNUSED_4AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4BFF.S b/vm/mterp/x86/OP_UNUSED_4BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4CFF.S b/vm/mterp/x86/OP_UNUSED_4CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4DFF.S b/vm/mterp/x86/OP_UNUSED_4DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4EFF.S b/vm/mterp/x86/OP_UNUSED_4EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_4FFF.S b/vm/mterp/x86/OP_UNUSED_4FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_4FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_50FF.S b/vm/mterp/x86/OP_UNUSED_50FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_50FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_51FF.S b/vm/mterp/x86/OP_UNUSED_51FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_51FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_52FF.S b/vm/mterp/x86/OP_UNUSED_52FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_52FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_53FF.S b/vm/mterp/x86/OP_UNUSED_53FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_53FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_54FF.S b/vm/mterp/x86/OP_UNUSED_54FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_54FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_55FF.S b/vm/mterp/x86/OP_UNUSED_55FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_55FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_56FF.S b/vm/mterp/x86/OP_UNUSED_56FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_56FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_57FF.S b/vm/mterp/x86/OP_UNUSED_57FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_57FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_58FF.S b/vm/mterp/x86/OP_UNUSED_58FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_58FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_59FF.S b/vm/mterp/x86/OP_UNUSED_59FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_59FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5AFF.S b/vm/mterp/x86/OP_UNUSED_5AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5BFF.S b/vm/mterp/x86/OP_UNUSED_5BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5CFF.S b/vm/mterp/x86/OP_UNUSED_5CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5DFF.S b/vm/mterp/x86/OP_UNUSED_5DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5EFF.S b/vm/mterp/x86/OP_UNUSED_5EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_5FFF.S b/vm/mterp/x86/OP_UNUSED_5FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_5FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_60FF.S b/vm/mterp/x86/OP_UNUSED_60FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_60FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_61FF.S b/vm/mterp/x86/OP_UNUSED_61FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_61FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_62FF.S b/vm/mterp/x86/OP_UNUSED_62FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_62FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_63FF.S b/vm/mterp/x86/OP_UNUSED_63FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_63FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_64FF.S b/vm/mterp/x86/OP_UNUSED_64FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_64FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_65FF.S b/vm/mterp/x86/OP_UNUSED_65FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_65FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_66FF.S b/vm/mterp/x86/OP_UNUSED_66FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_66FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_67FF.S b/vm/mterp/x86/OP_UNUSED_67FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_67FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_68FF.S b/vm/mterp/x86/OP_UNUSED_68FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_68FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_69FF.S b/vm/mterp/x86/OP_UNUSED_69FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_69FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6AFF.S b/vm/mterp/x86/OP_UNUSED_6AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6BFF.S b/vm/mterp/x86/OP_UNUSED_6BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6CFF.S b/vm/mterp/x86/OP_UNUSED_6CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6DFF.S b/vm/mterp/x86/OP_UNUSED_6DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6EFF.S b/vm/mterp/x86/OP_UNUSED_6EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_6FFF.S b/vm/mterp/x86/OP_UNUSED_6FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_6FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_70FF.S b/vm/mterp/x86/OP_UNUSED_70FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_70FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_71FF.S b/vm/mterp/x86/OP_UNUSED_71FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_71FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_72FF.S b/vm/mterp/x86/OP_UNUSED_72FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_72FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_73FF.S b/vm/mterp/x86/OP_UNUSED_73FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_73FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_74FF.S b/vm/mterp/x86/OP_UNUSED_74FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_74FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_75FF.S b/vm/mterp/x86/OP_UNUSED_75FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_75FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_76FF.S b/vm/mterp/x86/OP_UNUSED_76FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_76FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_77FF.S b/vm/mterp/x86/OP_UNUSED_77FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_77FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_78FF.S b/vm/mterp/x86/OP_UNUSED_78FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_78FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_79FF.S b/vm/mterp/x86/OP_UNUSED_79FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_79FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7AFF.S b/vm/mterp/x86/OP_UNUSED_7AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7BFF.S b/vm/mterp/x86/OP_UNUSED_7BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7CFF.S b/vm/mterp/x86/OP_UNUSED_7CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7DFF.S b/vm/mterp/x86/OP_UNUSED_7DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7EFF.S b/vm/mterp/x86/OP_UNUSED_7EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_7FFF.S b/vm/mterp/x86/OP_UNUSED_7FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_7FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_80FF.S b/vm/mterp/x86/OP_UNUSED_80FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_80FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_81FF.S b/vm/mterp/x86/OP_UNUSED_81FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_81FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_82FF.S b/vm/mterp/x86/OP_UNUSED_82FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_82FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_83FF.S b/vm/mterp/x86/OP_UNUSED_83FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_83FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_84FF.S b/vm/mterp/x86/OP_UNUSED_84FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_84FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_85FF.S b/vm/mterp/x86/OP_UNUSED_85FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_85FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_86FF.S b/vm/mterp/x86/OP_UNUSED_86FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_86FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_87FF.S b/vm/mterp/x86/OP_UNUSED_87FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_87FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_88FF.S b/vm/mterp/x86/OP_UNUSED_88FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_88FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_89FF.S b/vm/mterp/x86/OP_UNUSED_89FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_89FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8AFF.S b/vm/mterp/x86/OP_UNUSED_8AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8BFF.S b/vm/mterp/x86/OP_UNUSED_8BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8CFF.S b/vm/mterp/x86/OP_UNUSED_8CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8DFF.S b/vm/mterp/x86/OP_UNUSED_8DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8EFF.S b/vm/mterp/x86/OP_UNUSED_8EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_8FFF.S b/vm/mterp/x86/OP_UNUSED_8FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_8FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_90FF.S b/vm/mterp/x86/OP_UNUSED_90FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_90FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_91FF.S b/vm/mterp/x86/OP_UNUSED_91FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_91FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_92FF.S b/vm/mterp/x86/OP_UNUSED_92FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_92FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_93FF.S b/vm/mterp/x86/OP_UNUSED_93FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_93FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_94FF.S b/vm/mterp/x86/OP_UNUSED_94FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_94FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_95FF.S b/vm/mterp/x86/OP_UNUSED_95FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_95FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_96FF.S b/vm/mterp/x86/OP_UNUSED_96FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_96FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_97FF.S b/vm/mterp/x86/OP_UNUSED_97FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_97FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_98FF.S b/vm/mterp/x86/OP_UNUSED_98FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_98FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_99FF.S b/vm/mterp/x86/OP_UNUSED_99FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_99FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9AFF.S b/vm/mterp/x86/OP_UNUSED_9AFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9AFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9BFF.S b/vm/mterp/x86/OP_UNUSED_9BFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9BFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9CFF.S b/vm/mterp/x86/OP_UNUSED_9CFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9CFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9DFF.S b/vm/mterp/x86/OP_UNUSED_9DFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9DFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9EFF.S b/vm/mterp/x86/OP_UNUSED_9EFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9EFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_9FFF.S b/vm/mterp/x86/OP_UNUSED_9FFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_9FFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A0FF.S b/vm/mterp/x86/OP_UNUSED_A0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A1FF.S b/vm/mterp/x86/OP_UNUSED_A1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A2FF.S b/vm/mterp/x86/OP_UNUSED_A2FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A2FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A3FF.S b/vm/mterp/x86/OP_UNUSED_A3FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A3FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A4FF.S b/vm/mterp/x86/OP_UNUSED_A4FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A4FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A5FF.S b/vm/mterp/x86/OP_UNUSED_A5FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A5FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A6FF.S b/vm/mterp/x86/OP_UNUSED_A6FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A6FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A7FF.S b/vm/mterp/x86/OP_UNUSED_A7FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A7FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A8FF.S b/vm/mterp/x86/OP_UNUSED_A8FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A8FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_A9FF.S b/vm/mterp/x86/OP_UNUSED_A9FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_A9FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_AAFF.S b/vm/mterp/x86/OP_UNUSED_AAFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_AAFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_ABFF.S b/vm/mterp/x86/OP_UNUSED_ABFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_ABFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_ACFF.S b/vm/mterp/x86/OP_UNUSED_ACFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_ACFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_ADFF.S b/vm/mterp/x86/OP_UNUSED_ADFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_ADFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_AEFF.S b/vm/mterp/x86/OP_UNUSED_AEFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_AEFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_AFFF.S b/vm/mterp/x86/OP_UNUSED_AFFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_AFFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B0FF.S b/vm/mterp/x86/OP_UNUSED_B0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B1FF.S b/vm/mterp/x86/OP_UNUSED_B1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B2FF.S b/vm/mterp/x86/OP_UNUSED_B2FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B2FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B3FF.S b/vm/mterp/x86/OP_UNUSED_B3FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B3FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B4FF.S b/vm/mterp/x86/OP_UNUSED_B4FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B4FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B5FF.S b/vm/mterp/x86/OP_UNUSED_B5FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B5FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B6FF.S b/vm/mterp/x86/OP_UNUSED_B6FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B6FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B7FF.S b/vm/mterp/x86/OP_UNUSED_B7FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B7FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B8FF.S b/vm/mterp/x86/OP_UNUSED_B8FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B8FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_B9FF.S b/vm/mterp/x86/OP_UNUSED_B9FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_B9FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BAFF.S b/vm/mterp/x86/OP_UNUSED_BAFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BAFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BBFF.S b/vm/mterp/x86/OP_UNUSED_BBFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BBFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BCFF.S b/vm/mterp/x86/OP_UNUSED_BCFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BCFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BDFF.S b/vm/mterp/x86/OP_UNUSED_BDFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BDFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BEFF.S b/vm/mterp/x86/OP_UNUSED_BEFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BEFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_BFFF.S b/vm/mterp/x86/OP_UNUSED_BFFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_BFFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C0FF.S b/vm/mterp/x86/OP_UNUSED_C0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C1FF.S b/vm/mterp/x86/OP_UNUSED_C1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C2FF.S b/vm/mterp/x86/OP_UNUSED_C2FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C2FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C3FF.S b/vm/mterp/x86/OP_UNUSED_C3FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C3FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C4FF.S b/vm/mterp/x86/OP_UNUSED_C4FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C4FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C5FF.S b/vm/mterp/x86/OP_UNUSED_C5FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C5FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C6FF.S b/vm/mterp/x86/OP_UNUSED_C6FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C6FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C7FF.S b/vm/mterp/x86/OP_UNUSED_C7FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C7FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C8FF.S b/vm/mterp/x86/OP_UNUSED_C8FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C8FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_C9FF.S b/vm/mterp/x86/OP_UNUSED_C9FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_C9FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CAFF.S b/vm/mterp/x86/OP_UNUSED_CAFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CAFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CBFF.S b/vm/mterp/x86/OP_UNUSED_CBFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CBFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CCFF.S b/vm/mterp/x86/OP_UNUSED_CCFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CCFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CDFF.S b/vm/mterp/x86/OP_UNUSED_CDFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CDFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CEFF.S b/vm/mterp/x86/OP_UNUSED_CEFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CEFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_CFFF.S b/vm/mterp/x86/OP_UNUSED_CFFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_CFFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D0FF.S b/vm/mterp/x86/OP_UNUSED_D0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D1FF.S b/vm/mterp/x86/OP_UNUSED_D1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D2FF.S b/vm/mterp/x86/OP_UNUSED_D2FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D2FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D3FF.S b/vm/mterp/x86/OP_UNUSED_D3FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D3FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D4FF.S b/vm/mterp/x86/OP_UNUSED_D4FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D4FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D5FF.S b/vm/mterp/x86/OP_UNUSED_D5FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D5FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D6FF.S b/vm/mterp/x86/OP_UNUSED_D6FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D6FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D7FF.S b/vm/mterp/x86/OP_UNUSED_D7FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D7FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D8FF.S b/vm/mterp/x86/OP_UNUSED_D8FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D8FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_D9FF.S b/vm/mterp/x86/OP_UNUSED_D9FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_D9FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DAFF.S b/vm/mterp/x86/OP_UNUSED_DAFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DAFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DBFF.S b/vm/mterp/x86/OP_UNUSED_DBFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DBFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DCFF.S b/vm/mterp/x86/OP_UNUSED_DCFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DCFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DDFF.S b/vm/mterp/x86/OP_UNUSED_DDFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DDFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DEFF.S b/vm/mterp/x86/OP_UNUSED_DEFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DEFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_DFFF.S b/vm/mterp/x86/OP_UNUSED_DFFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_DFFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E0FF.S b/vm/mterp/x86/OP_UNUSED_E0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E1FF.S b/vm/mterp/x86/OP_UNUSED_E1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E2FF.S b/vm/mterp/x86/OP_UNUSED_E2FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E2FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E3FF.S b/vm/mterp/x86/OP_UNUSED_E3FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E3FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E4FF.S b/vm/mterp/x86/OP_UNUSED_E4FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E4FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E5FF.S b/vm/mterp/x86/OP_UNUSED_E5FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E5FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E6FF.S b/vm/mterp/x86/OP_UNUSED_E6FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E6FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E7FF.S b/vm/mterp/x86/OP_UNUSED_E7FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E7FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E8FF.S b/vm/mterp/x86/OP_UNUSED_E8FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E8FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E9FF.S b/vm/mterp/x86/OP_UNUSED_E9FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_E9FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_EAFF.S b/vm/mterp/x86/OP_UNUSED_EAFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_EAFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_EBFF.S b/vm/mterp/x86/OP_UNUSED_EBFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_EBFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_ECFF.S b/vm/mterp/x86/OP_UNUSED_ECFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_ECFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_EDFF.S b/vm/mterp/x86/OP_UNUSED_EDFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_EDFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_EEFF.S b/vm/mterp/x86/OP_UNUSED_EEFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_EEFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_EFFF.S b/vm/mterp/x86/OP_UNUSED_EFFF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_EFFF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_F0FF.S b/vm/mterp/x86/OP_UNUSED_F0FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_F0FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_F1FF.S b/vm/mterp/x86/OP_UNUSED_F1FF.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/vm/mterp/x86/OP_UNUSED_F1FF.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_USHR_LONG.S b/vm/mterp/x86/OP_USHR_LONG.S
index e23de7e..4f3647e 100644
--- a/vm/mterp/x86/OP_USHR_LONG.S
+++ b/vm/mterp/x86/OP_USHR_LONG.S
@@ -9,27 +9,24 @@
      */
     /* shr-long vAA, vBB, vCC */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rIBASE */
     /* rINSTw gets AA */
     movzbl    2(rPC),%eax               # eax<- BB
     movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 1           # edx<- v[BB+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE %eax 1         # rIBASE<- v[BB+1]
     GET_VREG_R  %ecx %ecx               # ecx<- vCC
     GET_VREG_WORD %eax %eax 0           # eax<- v[BB+0]
-    shrdl     %edx,%eax
-    shrl      %cl,%edx
+    shrdl     rIBASE,%eax
+    shrl      %cl,rIBASE
     testb     $$32,%cl
     je        2f
-    movl      %edx,%eax
-    xorl      %edx,%edx
+    movl      rIBASE,%eax
+    xorl      rIBASE,rIBASE
 2:
-    SET_VREG_WORD %edx rINST 1          # v[AA+1]<- edx
-    FETCH_INST_OPCODE 2 %edx
-    jmp       .L${opcode}_finish
-%break
-
-
-.L${opcode}_finish:
+    SET_VREG_WORD rIBASE rINST 1          # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0         # v[BB+0]<- eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_USHR_LONG_2ADDR.S b/vm/mterp/x86/OP_USHR_LONG_2ADDR.S
index b00ec61..d4396b4 100644
--- a/vm/mterp/x86/OP_USHR_LONG_2ADDR.S
+++ b/vm/mterp/x86/OP_USHR_LONG_2ADDR.S
@@ -5,28 +5,25 @@
      */
     /* shl-long/2addr vA, vB */
     /* ecx gets shift count */
-    /* Need to spill edx */
+    /* Need to spill rIBASE */
     /* rINSTw gets AA */
     movzbl    rINSTbl,%ecx             # ecx<- BA
     andb      $$0xf,rINSTbl            # rINST<- A
     GET_VREG_WORD %eax rINST 0         # eax<- v[AA+0]
     sarl      $$4,%ecx                 # ecx<- B
-    GET_VREG_WORD %edx rINST 1         # edx<- v[AA+1]
+    SPILL(rIBASE)
+    GET_VREG_WORD rIBASE rINST 1       # rIBASE<- v[AA+1]
     GET_VREG_R %ecx %ecx               # ecx<- vBB
-    shrdl     %edx,%eax
-    shrl      %cl,%edx
+    shrdl     rIBASE,%eax
+    shrl      %cl,rIBASE
     testb     $$32,%cl
     je        2f
-    movl      %edx,%eax
-    xorl      %edx,%edx
+    movl      rIBASE,%eax
+    xorl      rIBASE,rIBASE
 2:
-    SET_VREG_WORD %edx rINST 1         # v[AA+1]<- edx
-    jmp       .L${opcode}_finish
-%break
-
-
-.L${opcode}_finish:
-    FETCH_INST_OPCODE 1 %edx
+    SET_VREG_WORD rIBASE rINST 1       # v[AA+1]<- rIBASE
+    FETCH_INST_OPCODE 1 %ecx
+    UNSPILL(rIBASE)
     SET_VREG_WORD %eax rINST 0         # v[AA+0]<- eax
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/OP_XOR_LONG.S b/vm/mterp/x86/OP_XOR_LONG.S
index e9fa59a..3cd5ffb 100644
--- a/vm/mterp/x86/OP_XOR_LONG.S
+++ b/vm/mterp/x86/OP_XOR_LONG.S
@@ -1,2 +1,2 @@
 %verify "executed"
-%include "x86/binopWide.S" {"instr1":"xorl (rFP,%ecx,4),%edx", "instr2":"xorl 4(rFP,%ecx,4),%eax"}
+%include "x86/binopWide.S" {"instr1":"xorl (rFP,%ecx,4),rIBASE", "instr2":"xorl 4(rFP,%ecx,4),%eax"}
diff --git a/vm/mterp/x86/alt_stub.S b/vm/mterp/x86/alt_stub.S
new file mode 100644
index 0000000..c29dbc9
--- /dev/null
+++ b/vm/mterp/x86/alt_stub.S
@@ -0,0 +1,13 @@
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckInst to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ */
+    movl   rSELF, %eax
+    movl   rPC, OUT_ARG0(%esp)
+    movl   %eax, OUT_ARG1(%esp)
+    call   dvmCheckInst                            # (dPC, self)
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx), rIBASE # reload rIBASE
+    jmp    *dvmAsmInstructionStart+(${opnum}*4)
diff --git a/vm/mterp/x86/bindiv.S b/vm/mterp/x86/bindiv.S
index b2aafc3..cbd2f3d 100644
--- a/vm/mterp/x86/bindiv.S
+++ b/vm/mterp/x86/bindiv.S
@@ -9,6 +9,7 @@
     movzbl   3(rPC),%ecx            # ecx<- CC
     GET_VREG_R %eax %eax            # eax<- vBB
     GET_VREG_R %ecx %ecx            # eax<- vBB
+    SPILL(rIBASE)
     cmpl     $$0,%ecx
     je       common_errDivideByZero
     cmpl     $$-1,%ecx
@@ -16,14 +17,17 @@
     cmpl     $$0x80000000,%eax
     jne      .L${opcode}_continue_div
     movl     $special,$result
-    jmp      .L${opcode}_finish_div
+    SET_VREG $result rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
-%break
 .L${opcode}_continue_div:
     cltd
     idivl   %ecx
-.L${opcode}_finish_div:
     SET_VREG $result rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/bindiv2addr.S b/vm/mterp/x86/bindiv2addr.S
index fdbc4ec..cc415be 100644
--- a/vm/mterp/x86/bindiv2addr.S
+++ b/vm/mterp/x86/bindiv2addr.S
@@ -5,6 +5,7 @@
      */
     /* div/rem/2addr vA, vB */
     movzx    rINSTbl,%ecx          # eax<- BA
+    SPILL(rIBASE)
     sarl     $$4,%ecx              # ecx<- B
     GET_VREG_R %ecx %ecx           # eax<- vBB
     andb     $$0xf,rINSTbl         # rINST<- A
@@ -16,14 +17,17 @@
     cmpl     $$0x80000000,%eax
     jne      .L${opcode}_continue_div2addr
     movl     $special,$result
-    jmp      .L${opcode}_finish_div2addr
+    SET_VREG $result rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
 
-%break
 .L${opcode}_continue_div2addr:
     cltd
     idivl   %ecx
-.L${opcode}_finish_div2addr:
     SET_VREG $result rINST
-    FETCH_INST_OPCODE 1 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/bindivLit16.S b/vm/mterp/x86/bindivLit16.S
index 70bf183..c935367 100644
--- a/vm/mterp/x86/bindivLit16.S
+++ b/vm/mterp/x86/bindivLit16.S
@@ -6,6 +6,7 @@
     /* div/rem/lit16 vA, vB, #+CCCC */
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl   rINSTbl,%eax         # eax<- 000000BA
+    SPILL(rIBASE)
     sarl     $$4,%eax             # eax<- B
     GET_VREG_R %eax %eax          # eax<- vB
     movswl   2(rPC),%ecx          # ecx<- ssssCCCC
@@ -17,14 +18,17 @@
     cmpl     $$0x80000000,%eax
     jne      .L${opcode}_continue_div
     movl     $special,$result
-    jmp      .L${opcode}_finish_div
+    SET_VREG $result rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
-%break
 .L${opcode}_continue_div:
     cltd
     idivl   %ecx
-.L${opcode}_finish_div:
     SET_VREG $result rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/bindivLit8.S b/vm/mterp/x86/bindivLit8.S
index 4bd61a6..94adef4 100644
--- a/vm/mterp/x86/bindivLit8.S
+++ b/vm/mterp/x86/bindivLit8.S
@@ -6,6 +6,7 @@
     /* div/rem/lit8 vAA, vBB, #+CC */
     movzbl    2(rPC),%eax        # eax<- BB
     movsbl    3(rPC),%ecx        # ecx<- ssssssCC
+    SPILL(rIBASE)
     GET_VREG_R  %eax %eax        # eax<- rBB
     cmpl     $$0,%ecx
     je       common_errDivideByZero
@@ -14,14 +15,17 @@
     cmpl     $$-1,%ecx
     jne      .L${opcode}_continue_div
     movl     $special,$result
-    jmp      .L${opcode}_finish_div
+    SET_VREG $result rINST
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
 
-%break
 .L${opcode}_continue_div:
     cltd
     idivl   %ecx
-.L${opcode}_finish_div:
     SET_VREG $result rINST
-    FETCH_INST_OPCODE 2 %edx
+    UNSPILL(rIBASE)
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binflop.S b/vm/mterp/x86/binflop.S
index 0bcbb07..a432956 100644
--- a/vm/mterp/x86/binflop.S
+++ b/vm/mterp/x86/binflop.S
@@ -8,7 +8,7 @@
     movzbl   3(rPC),%ecx          # ecx<- BB
     $load    (rFP,%eax,4)         # vCC to fp stack
     $instr   (rFP,%ecx,4)         # ex: faddp
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
     $store   (rFP,rINST,4)         # %st to vAA
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binflop2addr.S b/vm/mterp/x86/binflop2addr.S
index 54ca894..c616d8b 100644
--- a/vm/mterp/x86/binflop2addr.S
+++ b/vm/mterp/x86/binflop2addr.S
@@ -10,7 +10,7 @@
     $load    (rFP,%ecx,4)          # vAA to fp stack
     sarl    $$4,rINST             # rINST<- B
     $instr   (rFP,rINST,4)         # ex: faddp
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %eax
     ADVANCE_PC 1
     $store    (rFP,%ecx,4)         # %st to vA
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %eax
diff --git a/vm/mterp/x86/binop.S b/vm/mterp/x86/binop.S
index c7511e0..af2e908 100644
--- a/vm/mterp/x86/binop.S
+++ b/vm/mterp/x86/binop.S
@@ -13,7 +13,7 @@
     movzbl   3(rPC),%ecx   # ecx<- CC
     GET_VREG_R %eax %eax   # eax<- vBB
     $instr                 # ex: addl    (rFP,%ecx,4),%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     SET_VREG $result rINST
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binop1.S b/vm/mterp/x86/binop1.S
index c4d4b1e..38c2daf 100644
--- a/vm/mterp/x86/binop1.S
+++ b/vm/mterp/x86/binop1.S
@@ -9,7 +9,7 @@
     GET_VREG_R %eax %eax            # eax<- vBB
     GET_VREG_R %ecx %ecx            # eax<- vBB
     $instr                          # ex: addl    %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
-    ADVANCE_PC 2
     SET_VREG $result rINST
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 2 %ecx
+    ADVANCE_PC 2
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binop2addr.S b/vm/mterp/x86/binop2addr.S
index 0b040c7..7cd8c5b 100644
--- a/vm/mterp/x86/binop2addr.S
+++ b/vm/mterp/x86/binop2addr.S
@@ -17,8 +17,8 @@
     movzx   rINSTbl,%ecx               # ecx<- A+
     sarl    $$4,rINST                 # rINST<- B
     GET_VREG_R %eax rINST              # eax<- vB
-    FETCH_INST_OPCODE 1 %edx
     andb    $$0xf,%cl                  # ecx<- A
     $instr                             # for ex: addl   %eax,(rFP,%ecx,4)
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binopLit16.S b/vm/mterp/x86/binopLit16.S
index 3c985cc..4ca6c77 100644
--- a/vm/mterp/x86/binopLit16.S
+++ b/vm/mterp/x86/binopLit16.S
@@ -16,6 +16,6 @@
     andb     $$0xf,rINSTbl              # rINST<- A
     $instr                              # for example: addl %ecx, %eax
     SET_VREG $result rINST
-    FETCH_INST_OPCODE 2 %edx
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binopLit8.S b/vm/mterp/x86/binopLit8.S
index d0bce9e..08e2efd 100644
--- a/vm/mterp/x86/binopLit8.S
+++ b/vm/mterp/x86/binopLit8.S
@@ -14,7 +14,7 @@
     movsbl    3(rPC),%ecx              # ecx<- ssssssCC
     GET_VREG_R   %eax %eax             # eax<- rBB
     $instr                             # ex: addl %ecx,%eax
-    FETCH_INST_OPCODE 2 %edx
     SET_VREG   $result rINST
+    FETCH_INST_OPCODE 2 %ecx
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binopWide.S b/vm/mterp/x86/binopWide.S
index 4131785..6114aff 100644
--- a/vm/mterp/x86/binopWide.S
+++ b/vm/mterp/x86/binopWide.S
@@ -5,12 +5,14 @@
 
     movzbl    2(rPC),%eax               # eax<- BB
     movzbl    3(rPC),%ecx               # ecx<- CC
-    GET_VREG_WORD %edx %eax 0           # edx<- v[BB+0]
+    SPILL(rIBASE)                       # save rIBASE
+    GET_VREG_WORD rIBASE %eax 0         # rIBASE<- v[BB+0]
     GET_VREG_WORD %eax %eax 1           # eax<- v[BB+1]
-    $instr1         # ex: addl   (rFP,%ecx,4),%edx
+    $instr1         # ex: addl   (rFP,%ecx,4),rIBASE
     $instr2         # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG_WORD %edx rINST 0          # v[AA+0] <- edx
-    FETCH_INST_OPCODE 2 %edx
+    SET_VREG_WORD rIBASE rINST 0        # v[AA+0] <- rIBASE
+    FETCH_INST_OPCODE 2 %ecx
+    UNSPILL(rIBASE)                     # restore rIBASE
     SET_VREG_WORD %eax rINST 1          # v[AA+1] <- eax
     ADVANCE_PC 2
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/binopWide2addr.S b/vm/mterp/x86/binopWide2addr.S
index be9084b..fa42644 100644
--- a/vm/mterp/x86/binopWide2addr.S
+++ b/vm/mterp/x86/binopWide2addr.S
@@ -9,6 +9,6 @@
     andb      $$0xF,rINSTbl             # rINST<- A
     $instr1         # example: addl   %eax,(rFP,rINST,4)
     $instr2         # example: adcl   %ecx,4(rFP,rINST,4)
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/cvtfp_int.S b/vm/mterp/x86/cvtfp_int.S
index 13dc936..7590943 100644
--- a/vm/mterp/x86/cvtfp_int.S
+++ b/vm/mterp/x86/cvtfp_int.S
@@ -20,7 +20,6 @@
     movb     $$0xc,%ah
     movw     %ax,LOCAL0_OFFSET+2(%ebp)
     fldcw    LOCAL0_OFFSET+2(%ebp)    # set "to zero" rounding mode
-    FETCH_INST_OPCODE 1 %edx
     andb     $$0xf,%cl                # ecx<- A
     .if $tgtlong
     fistpll  (rFP,%ecx,4)             # convert and store
@@ -28,11 +27,6 @@
     fistpl   (rFP,%ecx,4)             # convert and store
     .endif
     fldcw    LOCAL0_OFFSET(%ebp)      # restore previous rounding mode
-    jmp      .L${opcode}_continue
-%break
-
-
-.L${opcode}_continue:
     .if $tgtlong
     movl     $$0x80000000,%eax
     xorl     4(rFP,%ecx,4),%eax
@@ -43,8 +37,9 @@
     je       .L${opcode}_special_case # fix up result
 
 .L${opcode}_finish:
+    FETCH_INST_OPCODE 1 %ecx
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
 
 .L${opcode}_special_case:
     fnstsw   %ax
diff --git a/vm/mterp/x86/entry.S b/vm/mterp/x86/entry.S
index 27ef51c..b1b89a8 100644
--- a/vm/mterp/x86/entry.S
+++ b/vm/mterp/x86/entry.S
@@ -19,15 +19,15 @@
     .global dvmMterpStdRun
     .type   dvmMterpStdRun, %function
 /*
- * bool dvmMterpStdRun(MterpGlue* glue)
+ * bool dvmMterpStdRun(Thread* self)
  *
  * Interpreter entry point.  Returns changeInterp.
  *
  */
 dvmMterpStdRun:
-    movl    4(%esp), %ecx        # get incoming rGLUE
+    movl    4(%esp), %ecx        # get incoming rSELF
     push    %ebp                 # save caller base pointer
-    push    %ecx                 # save rGLUE at (%ebp)
+    push    %ecx                 # save rSELF at (%ebp)
     movl    %esp, %ebp           # set our %ebp
 /*
  * At this point we've allocated two slots on the stack
@@ -43,14 +43,15 @@
     movl    %ebx,EBX_SPILL(%ebp)
 
 /* Set up "named" registers */
-    movl    offGlue_pc(%ecx),rPC
-    movl    offGlue_fp(%ecx),rFP
+    movl    offThread_pc(%ecx),rPC
+    movl    offThread_fp(%ecx),rFP
+    movl    offThread_curHandlerTable(%ecx),rIBASE
 
 /* Remember %esp for future "longjmp" */
-    movl    %esp,offGlue_bailPtr(%ecx)
+    movl    %esp,offThread_bailPtr(%ecx)
 
 /* How to start? */
-    movb    offGlue_entryPoint(%ecx),%al
+    movb    offThread_entryPoint(%ecx),%al
 
 /* Normal start? */
     cmpb    $$kInterpEntryInstr,%al
@@ -62,7 +63,7 @@
 
 .Lnot_instr:
     /* Reset to normal case */
-    movb   $$kInterpEntryInstr,offGlue_entryPoint(%ecx)
+    movb   $$kInterpEntryInstr,offThread_entryPoint(%ecx)
     cmpb   $$kInterpEntryReturn,%al
     je     common_returnFromMethod
     cmpb   $$kInterpEntryThrow,%al
@@ -78,7 +79,7 @@
     .global dvmMterpStdBail
     .type   dvmMterpStdBail, %function
 /*
- * void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+ * void dvmMterpStdBail(Thread* self, bool changeInterp)
  *
  * Restore the stack pointer and PC from the save point established on entry.
  * This is essentially the same as a longjmp, but should be cheaper.  The
@@ -88,13 +89,13 @@
  * look a little strange.
  *
  * On entry:
- *  esp+4 (arg0)  MterpGlue* glue
+ *  esp+4 (arg0)  Thread* self
  *  esp+8 (arg1)  bool changeInterp
  */
 dvmMterpStdBail:
-    movl    4(%esp),%ecx                 # grab glue
+    movl    4(%esp),%ecx                 # grab self
     movl    8(%esp),%eax                 # changeInterp to return reg
-    movl    offGlue_bailPtr(%ecx),%esp   # Restore "setjmp" esp
+    movl    offThread_bailPtr(%ecx),%esp   # Restore "setjmp" esp
     movl    %esp,%ebp
     addl    $$(FRAME_SIZE-8), %ebp       # Restore %ebp at point of setjmp
     movl    EDI_SPILL(%ebp),%edi
@@ -112,269 +113,3 @@
 .LstrBadEntryPoint:
     .asciz  "Bad entry point %d\n"
 
-
-/*
- * FIXME: Should have the config/rebuild mechanism generate this
- * for targets that need it.
- */
-
-/* Jump table */
-dvmAsmInstructionJmpTable = .LdvmAsmInstructionJmpTable
-.LdvmAsmInstructionJmpTable:
-.long .L_OP_NOP
-.long .L_OP_MOVE
-.long .L_OP_MOVE_FROM16
-.long .L_OP_MOVE_16
-.long .L_OP_MOVE_WIDE
-.long .L_OP_MOVE_WIDE_FROM16
-.long .L_OP_MOVE_WIDE_16
-.long .L_OP_MOVE_OBJECT
-.long .L_OP_MOVE_OBJECT_FROM16
-.long .L_OP_MOVE_OBJECT_16
-.long .L_OP_MOVE_RESULT
-.long .L_OP_MOVE_RESULT_WIDE
-.long .L_OP_MOVE_RESULT_OBJECT
-.long .L_OP_MOVE_EXCEPTION
-.long .L_OP_RETURN_VOID
-.long .L_OP_RETURN
-.long .L_OP_RETURN_WIDE
-.long .L_OP_RETURN_OBJECT
-.long .L_OP_CONST_4
-.long .L_OP_CONST_16
-.long .L_OP_CONST
-.long .L_OP_CONST_HIGH16
-.long .L_OP_CONST_WIDE_16
-.long .L_OP_CONST_WIDE_32
-.long .L_OP_CONST_WIDE
-.long .L_OP_CONST_WIDE_HIGH16
-.long .L_OP_CONST_STRING
-.long .L_OP_CONST_STRING_JUMBO
-.long .L_OP_CONST_CLASS
-.long .L_OP_MONITOR_ENTER
-.long .L_OP_MONITOR_EXIT
-.long .L_OP_CHECK_CAST
-.long .L_OP_INSTANCE_OF
-.long .L_OP_ARRAY_LENGTH
-.long .L_OP_NEW_INSTANCE
-.long .L_OP_NEW_ARRAY
-.long .L_OP_FILLED_NEW_ARRAY
-.long .L_OP_FILLED_NEW_ARRAY_RANGE
-.long .L_OP_FILL_ARRAY_DATA
-.long .L_OP_THROW
-.long .L_OP_GOTO
-.long .L_OP_GOTO_16
-.long .L_OP_GOTO_32
-.long .L_OP_PACKED_SWITCH
-.long .L_OP_SPARSE_SWITCH
-.long .L_OP_CMPL_FLOAT
-.long .L_OP_CMPG_FLOAT
-.long .L_OP_CMPL_DOUBLE
-.long .L_OP_CMPG_DOUBLE
-.long .L_OP_CMP_LONG
-.long .L_OP_IF_EQ
-.long .L_OP_IF_NE
-.long .L_OP_IF_LT
-.long .L_OP_IF_GE
-.long .L_OP_IF_GT
-.long .L_OP_IF_LE
-.long .L_OP_IF_EQZ
-.long .L_OP_IF_NEZ
-.long .L_OP_IF_LTZ
-.long .L_OP_IF_GEZ
-.long .L_OP_IF_GTZ
-.long .L_OP_IF_LEZ
-.long .L_OP_UNUSED_3E
-.long .L_OP_UNUSED_3F
-.long .L_OP_UNUSED_40
-.long .L_OP_UNUSED_41
-.long .L_OP_UNUSED_42
-.long .L_OP_UNUSED_43
-.long .L_OP_AGET
-.long .L_OP_AGET_WIDE
-.long .L_OP_AGET_OBJECT
-.long .L_OP_AGET_BOOLEAN
-.long .L_OP_AGET_BYTE
-.long .L_OP_AGET_CHAR
-.long .L_OP_AGET_SHORT
-.long .L_OP_APUT
-.long .L_OP_APUT_WIDE
-.long .L_OP_APUT_OBJECT
-.long .L_OP_APUT_BOOLEAN
-.long .L_OP_APUT_BYTE
-.long .L_OP_APUT_CHAR
-.long .L_OP_APUT_SHORT
-.long .L_OP_IGET
-.long .L_OP_IGET_WIDE
-.long .L_OP_IGET_OBJECT
-.long .L_OP_IGET_BOOLEAN
-.long .L_OP_IGET_BYTE
-.long .L_OP_IGET_CHAR
-.long .L_OP_IGET_SHORT
-.long .L_OP_IPUT
-.long .L_OP_IPUT_WIDE
-.long .L_OP_IPUT_OBJECT
-.long .L_OP_IPUT_BOOLEAN
-.long .L_OP_IPUT_BYTE
-.long .L_OP_IPUT_CHAR
-.long .L_OP_IPUT_SHORT
-.long .L_OP_SGET
-.long .L_OP_SGET_WIDE
-.long .L_OP_SGET_OBJECT
-.long .L_OP_SGET_BOOLEAN
-.long .L_OP_SGET_BYTE
-.long .L_OP_SGET_CHAR
-.long .L_OP_SGET_SHORT
-.long .L_OP_SPUT
-.long .L_OP_SPUT_WIDE
-.long .L_OP_SPUT_OBJECT
-.long .L_OP_SPUT_BOOLEAN
-.long .L_OP_SPUT_BYTE
-.long .L_OP_SPUT_CHAR
-.long .L_OP_SPUT_SHORT
-.long .L_OP_INVOKE_VIRTUAL
-.long .L_OP_INVOKE_SUPER
-.long .L_OP_INVOKE_DIRECT
-.long .L_OP_INVOKE_STATIC
-.long .L_OP_INVOKE_INTERFACE
-.long .L_OP_UNUSED_73
-.long .L_OP_INVOKE_VIRTUAL_RANGE
-.long .L_OP_INVOKE_SUPER_RANGE
-.long .L_OP_INVOKE_DIRECT_RANGE
-.long .L_OP_INVOKE_STATIC_RANGE
-.long .L_OP_INVOKE_INTERFACE_RANGE
-.long .L_OP_UNUSED_79
-.long .L_OP_UNUSED_7A
-.long .L_OP_NEG_INT
-.long .L_OP_NOT_INT
-.long .L_OP_NEG_LONG
-.long .L_OP_NOT_LONG
-.long .L_OP_NEG_FLOAT
-.long .L_OP_NEG_DOUBLE
-.long .L_OP_INT_TO_LONG
-.long .L_OP_INT_TO_FLOAT
-.long .L_OP_INT_TO_DOUBLE
-.long .L_OP_LONG_TO_INT
-.long .L_OP_LONG_TO_FLOAT
-.long .L_OP_LONG_TO_DOUBLE
-.long .L_OP_FLOAT_TO_INT
-.long .L_OP_FLOAT_TO_LONG
-.long .L_OP_FLOAT_TO_DOUBLE
-.long .L_OP_DOUBLE_TO_INT
-.long .L_OP_DOUBLE_TO_LONG
-.long .L_OP_DOUBLE_TO_FLOAT
-.long .L_OP_INT_TO_BYTE
-.long .L_OP_INT_TO_CHAR
-.long .L_OP_INT_TO_SHORT
-.long .L_OP_ADD_INT
-.long .L_OP_SUB_INT
-.long .L_OP_MUL_INT
-.long .L_OP_DIV_INT
-.long .L_OP_REM_INT
-.long .L_OP_AND_INT
-.long .L_OP_OR_INT
-.long .L_OP_XOR_INT
-.long .L_OP_SHL_INT
-.long .L_OP_SHR_INT
-.long .L_OP_USHR_INT
-.long .L_OP_ADD_LONG
-.long .L_OP_SUB_LONG
-.long .L_OP_MUL_LONG
-.long .L_OP_DIV_LONG
-.long .L_OP_REM_LONG
-.long .L_OP_AND_LONG
-.long .L_OP_OR_LONG
-.long .L_OP_XOR_LONG
-.long .L_OP_SHL_LONG
-.long .L_OP_SHR_LONG
-.long .L_OP_USHR_LONG
-.long .L_OP_ADD_FLOAT
-.long .L_OP_SUB_FLOAT
-.long .L_OP_MUL_FLOAT
-.long .L_OP_DIV_FLOAT
-.long .L_OP_REM_FLOAT
-.long .L_OP_ADD_DOUBLE
-.long .L_OP_SUB_DOUBLE
-.long .L_OP_MUL_DOUBLE
-.long .L_OP_DIV_DOUBLE
-.long .L_OP_REM_DOUBLE
-.long .L_OP_ADD_INT_2ADDR
-.long .L_OP_SUB_INT_2ADDR
-.long .L_OP_MUL_INT_2ADDR
-.long .L_OP_DIV_INT_2ADDR
-.long .L_OP_REM_INT_2ADDR
-.long .L_OP_AND_INT_2ADDR
-.long .L_OP_OR_INT_2ADDR
-.long .L_OP_XOR_INT_2ADDR
-.long .L_OP_SHL_INT_2ADDR
-.long .L_OP_SHR_INT_2ADDR
-.long .L_OP_USHR_INT_2ADDR
-.long .L_OP_ADD_LONG_2ADDR
-.long .L_OP_SUB_LONG_2ADDR
-.long .L_OP_MUL_LONG_2ADDR
-.long .L_OP_DIV_LONG_2ADDR
-.long .L_OP_REM_LONG_2ADDR
-.long .L_OP_AND_LONG_2ADDR
-.long .L_OP_OR_LONG_2ADDR
-.long .L_OP_XOR_LONG_2ADDR
-.long .L_OP_SHL_LONG_2ADDR
-.long .L_OP_SHR_LONG_2ADDR
-.long .L_OP_USHR_LONG_2ADDR
-.long .L_OP_ADD_FLOAT_2ADDR
-.long .L_OP_SUB_FLOAT_2ADDR
-.long .L_OP_MUL_FLOAT_2ADDR
-.long .L_OP_DIV_FLOAT_2ADDR
-.long .L_OP_REM_FLOAT_2ADDR
-.long .L_OP_ADD_DOUBLE_2ADDR
-.long .L_OP_SUB_DOUBLE_2ADDR
-.long .L_OP_MUL_DOUBLE_2ADDR
-.long .L_OP_DIV_DOUBLE_2ADDR
-.long .L_OP_REM_DOUBLE_2ADDR
-.long .L_OP_ADD_INT_LIT16
-.long .L_OP_RSUB_INT
-.long .L_OP_MUL_INT_LIT16
-.long .L_OP_DIV_INT_LIT16
-.long .L_OP_REM_INT_LIT16
-.long .L_OP_AND_INT_LIT16
-.long .L_OP_OR_INT_LIT16
-.long .L_OP_XOR_INT_LIT16
-.long .L_OP_ADD_INT_LIT8
-.long .L_OP_RSUB_INT_LIT8
-.long .L_OP_MUL_INT_LIT8
-.long .L_OP_DIV_INT_LIT8
-.long .L_OP_REM_INT_LIT8
-.long .L_OP_AND_INT_LIT8
-.long .L_OP_OR_INT_LIT8
-.long .L_OP_XOR_INT_LIT8
-.long .L_OP_SHL_INT_LIT8
-.long .L_OP_SHR_INT_LIT8
-.long .L_OP_USHR_INT_LIT8
-.long .L_OP_IGET_VOLATILE
-.long .L_OP_IPUT_VOLATILE
-.long .L_OP_SGET_VOLATILE
-.long .L_OP_SPUT_VOLATILE
-.long .L_OP_IGET_OBJECT_VOLATILE
-.long .L_OP_IGET_WIDE_VOLATILE
-.long .L_OP_IPUT_WIDE_VOLATILE
-.long .L_OP_SGET_WIDE_VOLATILE
-.long .L_OP_SPUT_WIDE_VOLATILE
-.long .L_OP_BREAKPOINT
-.long .L_OP_THROW_VERIFICATION_ERROR
-.long .L_OP_EXECUTE_INLINE
-.long .L_OP_EXECUTE_INLINE_RANGE
-.long .L_OP_INVOKE_DIRECT_EMPTY
-.long .L_OP_RETURN_VOID_BARRIER
-.long .L_OP_IGET_QUICK
-.long .L_OP_IGET_WIDE_QUICK
-.long .L_OP_IGET_OBJECT_QUICK
-.long .L_OP_IPUT_QUICK
-.long .L_OP_IPUT_WIDE_QUICK
-.long .L_OP_IPUT_OBJECT_QUICK
-.long .L_OP_INVOKE_VIRTUAL_QUICK
-.long .L_OP_INVOKE_VIRTUAL_QUICK_RANGE
-.long .L_OP_INVOKE_SUPER_QUICK
-.long .L_OP_INVOKE_SUPER_QUICK_RANGE
-.long .L_OP_IPUT_OBJECT_VOLATILE
-.long .L_OP_SGET_OBJECT_VOLATILE
-.long .L_OP_SPUT_OBJECT_VOLATILE
-.long .L_OP_DISPATCH_FF
-
diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S
index 6e2c5bd..643deb8 100644
--- a/vm/mterp/x86/footer.S
+++ b/vm/mterp/x86/footer.S
@@ -19,24 +19,138 @@
 
 #if defined(WITH_JIT)
 /*
- * Placeholder entries for x86 JIT
+ * JIT-related re-entries into the interpreter.  In general, if the
+ * exit from a translation can at some point be chained, the entry
+ * here requires that control arrived via a call, and that the "rp"
+ * on TOS is actually a pointer to a 32-bit cell containing the Dalvik PC
+ * of the next insn to handle.  If no chaining will happen, the entry
+ * should be reached via a direct jump and rPC set beforehand.
  */
+
     .global dvmJitToInterpPunt
+/*
+ * The compiler will generate a jump to this entry point when it is
+ * having difficulty translating a Dalvik instruction.  We must skip
+ * the code cache lookup & prevent chaining to avoid bouncing between
+ * the interpreter and code cache. rPC must be set on entry.
+ */
 dvmJitToInterpPunt:
+#if defined(WITH_JIT_TUNING)
+    movl   rPC, OUT_ARG0(%esp)
+    call   dvmBumpPunt
+#endif
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx
+    GOTO_NEXT_R %ecx
+
     .global dvmJitToInterpSingleStep
+/*
+ * Return to the interpreter to handle a single instruction.
+ * Should be reached via a call.
+ * On entry:
+ *   0(%esp)          <= native return address within trace
+ *   rPC              <= Dalvik PC of this instruction
+ *   OUT_ARG0+4(%esp) <= Dalvik PC of next instruction
+ */
 dvmJitToInterpSingleStep:
+    pop    %eax
+    movl   rSELF, %ecx
+    movl   OUT_ARG0(%esp), %edx
+    movl   %eax,offThread_jitResumeNPC(%ecx)
+    movl   %edx,offThread_jitResumeDPC(%ecx)
+    movl   $$kInterpEntryInstr,offThread_entryPoint(%ecx)
+    movl   $$1,rINST     # changeInterp <= true
+    jmp    common_gotoBail
+
     .global dvmJitToInterpNoChainNoProfile
+/*
+ * Return from the translation cache to the interpreter to do method
+ * invocation.  Check if the translation exists for the callee, but don't
+ * chain to it. rPC must be set on entry.
+ */
 dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+    call   dvmBumpNoChain
+#endif
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr        # is there a translation?
+    movl   rSELF,%ecx                # ecx <- self
+    movl   %eax,offThread_inJitCodeCache(%ecx)  # set inJitCodeCache flag
+    cmpl   $$0, %eax
+    jz     1f
+    call   *%eax                     # exec translation if we've got one
+    # won't return
+1:
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx
+    GOTO_NEXT_R %ecx
+
+/*
+ * Return from the translation cache and immediately request a
+ * translation fro the exit target, but don't attempt to chain.
+ * rPC set on entry.
+ */
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
+#if defined(WITH_JIT_TUNING)
+    call   dvmBumpNoChain
+#endif
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr # is there a translation?
+    movl   rSELF,%ecx
+    cmpl   $$0,%eax
+    movl   %eax,offThread_inJitCodeCache(%ecx)  # set inJitCodeCache flag
+    jz     1f
+    call   *%eax              # jump to tranlation
+    # won't return
+
+/* No Translation - request one */
+1:
+    GET_JIT_PROF_TABLE %ecx %eax
+    cmpl   $$0, %eax          # JIT enabled?
+    jnz    2f                 # Request one if so
+    movl   rSELF, %ecx
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST_R %ecx         # Continue interpreting if not
+    GOTO_NEXT_R %ecx
+2:
+    movl   $$kJitTSelectRequestHot,rINST  # ask for trace select
+    jmp    common_selectTrace
+
+/*
+ * Return from the translation cache and immediately request a
+ * translation for the exit target.  Reached via a call, and
+ * (TOS)->rPC.
+ */
     .global dvmJitToInterpTraceSelect
 dvmJitToInterpTraceSelect:
+    pop    rINST           # save chain cell address in callee save reg
+    movl   (rINST),rPC
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr # is there a translation?
+    cmpl   $$0,%eax
+    jz     1b                 # no - ask for one
+    movl   %eax,OUT_ARG0(%esp)
+# FIXME - need to adjust rINST to beginning of sequence
+    movl   rINST,OUT_ARG1(%esp)
+    call   dvmJitChain        # Attempt dvmJitChain(codeAddr,chainAddr)
+    cmpl   $$0,%eax           # Success?
+    jz     toInterpreter      # didn't chain - interpret
+    call   *%eax
+    # won't return
+
+/*
+ * Placeholder entries for x86 JIT
+ */
     .global dvmJitToInterpBackwardBranch
 dvmJitToInterpBackwardBranch:
     .global dvmJitToInterpNormal
 dvmJitToInterpNormal:
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
+toInterpreter:
     jmp  common_abort
 #endif
 
@@ -47,20 +161,94 @@
  *   ebx (a.k.a. rINST) -> PC adjustment in 16-bit words
  */
 common_backwardBranch:
-    movl    rGLUE,%ecx
-    call   common_periodicChecks  # Note: expects rPC to be preserved
+    movl    rSELF,%ecx
+    call   common_periodicChecks  # rPC and ecx/rSELF preserved
+#if defined(WITH_JIT)
+    GET_JIT_PROF_TABLE %ecx rIBASE
+    ADVANCE_PC_INDEXED rINST
+    cmpl   $$0,rIBASE
+    movl   offThread_curHandlerTable(%ecx),rIBASE
+    FETCH_INST
+    jz    1f                    # Profiling off - continue
+    .global updateProfile
+updateProfile:
+common_updateProfile:
+    # quick & dirty hash
+    movl   rPC, %eax
+    shrl   $$12, %eax
+    xorl   rPC, %eax
+    andl   $$((1<<JIT_PROF_SIZE_LOG_2)-1),%eax
+    decb   (%edx,%eax)
+    jz     2f
+1:
+    GOTO_NEXT
+2:
+/*
+ * Here, we switch to the debug interpreter to request
+ * trace selection.  First, though, check to see if there
+ * is already a native translation in place (and, if so,
+ * jump to it now.
+ */
+    GET_JIT_THRESHOLD %ecx rINST  # leaves rSELF in %ecx
+    EXPORT_PC
+    movb   rINSTbl,(%edx,%eax)   # reset counter
+    movl   %ecx,rINST            # preserve rSELF
+    movl   rPC,OUT_ARG0(%esp)
+    call   dvmJitGetTraceAddr  # already have one?
+    movl   %eax,offThread_inJitCodeCache(rINST)   # set the inJitCodeCache flag
+    cmpl   $$0,%eax
+    jz     1f
+    call   *%eax        # FIXME: decide call vs/ jmp!.  No return either way
+1:
+    movl   $$kJitTSelectRequest,%eax
+    # On entry, eax<- jitState, rPC valid
+common_selectTrace:
+
+    movl   rSELF,%ecx
+    movl   %eax,offThread_jitState(%ecx)
+    movl   $$kInterpEntryInstr,offThread_entryPoint(%ecx)
+    movl   $$1,rINST
+    jmp    common_gotoBail
+#else
+    movl   offThread_curHandlerTable(%ecx),rIBASE
     ADVANCE_PC_INDEXED rINST
     FETCH_INST
     GOTO_NEXT
+#endif
 
 
 
 /*
+ * Common code for jumbo method invocation.
+ *
+ * On entry:
+ *   eax = Method* methodToCall
+ *   rINSTw trashed, must reload
+ *   rIBASE trashed, must reload before resuming interpreter
+ */
+
+common_invokeMethodJumbo:
+.LinvokeNewJumbo:
+
+   /*
+    * prepare to copy args to "outs" area of current frame
+    */
+    movzwl      6(rPC),rINST            # rINST<- BBBB
+    movzwl      8(rPC), %ecx            # %ecx<- CCCC
+    ADVANCE_PC 2                        # adjust pc to make return similar
+    SAVEAREA_FROM_FP %edx               # %edx<- &StackSaveArea
+    test        rINST, rINST
+    movl        rINST, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- BBBB
+    jz          .LinvokeArgsDone        # no args; jump to args done
+    jmp         .LinvokeRangeArgs       # handle args like invoke range
+
+/*
  * Common code for method invocation with range.
  *
  * On entry:
  *   eax = Method* methodToCall
  *   rINSTw trashed, must reload
+ *   rIBASE trashed, must reload before resuming interpreter
  */
 
 common_invokeMethodRange:
@@ -83,6 +271,7 @@
     * (very few methods have > 10 args; could unroll for common cases)
     */
 
+.LinvokeRangeArgs:
     movl        %ebx, LOCAL1_OFFSET(%ebp)       # LOCAL1_OFFSET(%ebp)<- save %ebx
     lea         (rFP, %ecx, 4), %ecx    # %ecx<- &vCCCC
     shll        $$2, LOCAL0_OFFSET(%ebp)        # LOCAL0_OFFSET(%ebp)<- offset
@@ -101,6 +290,7 @@
    /*
     * %eax is "Method* methodToCall", the method we're trying to call
     * prepare to copy args to "outs" area of current frame
+    * rIBASE trashed, must reload before resuming interpreter
     */
 
 common_invokeMethodNoRange:
@@ -168,11 +358,11 @@
     shl         $$2, %edx               # %edx<- update offset
     SAVEAREA_FROM_FP %eax               # %eax<- &StackSaveArea
     subl        %edx, %eax              # %eax<- newFP; (old savearea - regsSize)
-    movl        rGLUE,%edx              # %edx<- pMterpGlue
+    movl        rSELF,%edx              # %edx<- pthread
     movl        %eax, LOCAL1_OFFSET(%ebp)       # LOCAL1_OFFSET(%ebp)<- &outs
     subl        $$sizeofStackSaveArea, %eax # %eax<- newSaveArea (stack save area using newFP)
-    movl        offGlue_interpStackEnd(%edx), %edx # %edx<- glue->interpStackEnd
-    movl        %edx, LOCAL2_OFFSET(%ebp)       # LOCAL2_OFFSET<- glue->interpStackEnd
+    movl        offThread_interpStackEnd(%edx), %edx # %edx<- self->interpStackEnd
+    movl        %edx, LOCAL2_OFFSET(%ebp)       # LOCAL2_OFFSET<- self->interpStackEnd
     shl         $$2, %ecx               # %ecx<- update offset for outsSize
     movl        %eax, %edx              # %edx<- newSaveArea
     sub         %ecx, %eax              # %eax<- bottom; (newSaveArea - outsSize)
@@ -195,19 +385,19 @@
     jne         .LinvokeNative          # handle native call
 
    /*
-    * Update "glue" values for the new method
+    * Update "self" values for the new method
     * %eax=methodToCall, LOCAL1_OFFSET(%ebp)=newFp
     */
 
     movl        offMethod_clazz(%eax), %edx # %edx<- method->clazz
-    movl        rGLUE,%ecx                  # %ecx<- pMterpGlue
+    movl        rSELF,%ecx                  # %ecx<- pthread
     movl        offClassObject_pDvmDex(%edx), %edx # %edx<- method->clazz->pDvmDex
-    movl        %eax, offGlue_method(%ecx) # glue->method<- methodToCall
-    movl        %edx, offGlue_methodClassDex(%ecx) # glue->methodClassDex<- method->clazz->pDvmDex
+    movl        %eax, offThread_method(%ecx) # self->method<- methodToCall
+    movl        %edx, offThread_methodClassDex(%ecx) # self->methodClassDex<- method->clazz->pDvmDex
     movl        offMethod_insns(%eax), rPC # rPC<- methodToCall->insns
-    movl        offGlue_self(%ecx), %eax # %eax<- glue->self
     movl        LOCAL1_OFFSET(%ebp), rFP # rFP<- newFP
-    movl        rFP, offThread_curFrame(%eax) # glue->self->curFrame<- newFP
+    movl        rFP, offThread_curFrame(%ecx) # self->curFrame<- newFP
+    movl        offThread_curHandlerTable(%ecx),rIBASE
     FETCH_INST
     GOTO_NEXT                           # jump to methodToCall->insns
 
@@ -217,39 +407,38 @@
     */
 
 .LinvokeNative:
-    movl        rGLUE,%ecx              # %ecx<- pMterpGlue
+    movl        rSELF,%ecx              # %ecx<- pthread
     movl        %eax, OUT_ARG1(%esp)    # push parameter methodToCall
-    movl        offGlue_self(%ecx), %ecx        # %ecx<- glue->self
     movl        offThread_jniLocal_topCookie(%ecx), %eax # %eax<- self->localRef->...
     movl        %eax, offStackSaveArea_localRefCookie(%edx) # newSaveArea->localRefCookie<- top
     movl        %edx, OUT_ARG4(%esp)    # save newSaveArea
     movl        LOCAL1_OFFSET(%ebp), %edx # %edx<- newFP
-    movl        %edx, offThread_curFrame(%ecx)  # glue->self->curFrame<- newFP
-    movl        %ecx, OUT_ARG3(%esp)    # save glue->self
-    movl        %ecx, OUT_ARG2(%esp)    # push parameter glue->self
-    movl        rGLUE,%ecx              # %ecx<- pMterpGlue
+    movl        %edx, offThread_curFrame(%ecx)  # self->curFrame<- newFP
+    movl        %ecx, OUT_ARG3(%esp)    # save self
+    movl        %ecx, OUT_ARG2(%esp)    # push parameter self
+    movl        rSELF,%ecx              # %ecx<- pthread
     movl        OUT_ARG1(%esp), %eax    # %eax<- methodToCall
-    lea         offGlue_retval(%ecx), %ecx # %ecx<- &retval
-    movl        %ecx, OUT_ARG0(%esp)    # push parameter pMterpGlue
+    lea         offThread_retval(%ecx), %ecx # %ecx<- &retval
+    movl        %ecx, OUT_ARG0(%esp)    # push parameter pthread
     push        %edx                    # push parameter newFP
 
     call        *offMethod_nativeFunc(%eax) # call methodToCall->nativeFunc
     lea         4(%esp), %esp
     movl        OUT_ARG4(%esp), %ecx    # %ecx<- newSaveArea
-    movl        OUT_ARG3(%esp), %eax    # %eax<- glue->self
+    movl        OUT_ARG3(%esp), %eax    # %eax<- self
     movl        offStackSaveArea_localRefCookie(%ecx), %edx # %edx<- old top
     cmp         $$0, offThread_exception(%eax) # check for exception
-    movl        rFP, offThread_curFrame(%eax) # glue->self->curFrame<- rFP
+    movl        rFP, offThread_curFrame(%eax) # self->curFrame<- rFP
     movl        %edx, offThread_jniLocal_topCookie(%eax) # new top <- old top
     jne         common_exceptionThrown  # handle exception
-    FETCH_INST_OPCODE 3 %edx
+    movl        offThread_curHandlerTable(%eax),rIBASE
+    FETCH_INST_OPCODE 3 %ecx
     ADVANCE_PC 3
-    GOTO_NEXT_R %edx                    # jump to next instruction
+    GOTO_NEXT_R %ecx                    # jump to next instruction
 
 .LstackOverflow:    # eax=methodToCall
     movl        %eax, OUT_ARG1(%esp)    # push parameter methodToCall
-    movl        rGLUE,%eax              # %eax<- pMterpGlue
-    movl        offGlue_self(%eax), %eax # %eax<- glue->self
+    movl        rSELF,%eax              # %eax<- self
     movl        %eax, OUT_ARG0(%esp)    # push parameter self
     call        dvmHandleStackOverflow  # call: (Thread* self, Method* meth)
     jmp         common_exceptionThrown  # handle exception
@@ -260,8 +449,8 @@
  *
  * On entry:
  *   ebx  -> PC adjustment in 16-bit words (must be preserved)
- *   ecx  -> GLUE pointer
- *   reentry type, e.g. kInterpEntryInstr stored in rGLUE->entryPoint
+ *   ecx  -> SELF pointer
+ *   reentry type, e.g. kInterpEntryInstr stored in rSELF->entryPoint
  *
  * Note: A call will normally kill %eax and %ecx.  To
  *       streamline the normal case, this routine will preserve
@@ -269,37 +458,29 @@
  *       is a bit ugly, but will happen in the relatively uncommon path.
  * TODO: Basic-block style Jit will need a hook here as well.  Fold it into
  *       the suspendCount check so we can get both in 1 shot.
+ * TUNING: Improve scheduling here & do initial single test for all.
  */
 common_periodicChecks:
-    movl    offGlue_pSelfSuspendCount(%ecx),%eax    # eax <- &suspendCount
-    cmpl    $$0,(%eax)
+    cmpl    $$0,offThread_suspendCount(%ecx)     # non-zero suspendCount?
     jne     1f
 
 6:
-    movl   offGlue_pDebuggerActive(%ecx),%eax      # eax <- &DebuggerActive
-    movl   offGlue_pActiveProfilers(%ecx),%ecx     # ecx <- &ActiveProfilers
-    testl  %eax,%eax               # debugger enabled?
-    je     2f
-    movzbl (%eax),%eax             # get active count
-2:
-    orl    (%ecx),%eax             # eax <- debuggerActive | activeProfilers
-    movl   rGLUE,%ecx              # restore rGLUE
-    jne    3f                      # one or both active - switch interp
-
-5:
+    movl   offThread_pInterpBreak(%ecx),%eax    # eax <- &interpBreak
+    cmpl   $$0,(%eax)              # something interesting happening?
+    jne    3f                      # yes - switch interpreters
     ret
 
     /* Check for suspend */
 1:
     /*  At this point, the return pointer to the caller of
      *  common_periodicChecks is on the top of stack.  We need to preserve
-     *  GLUE(ecx).
+     *  SELF(ecx).
      *  The outgoing profile is:
      *      bool dvmCheckSuspendPending(Thread* self)
      *  Because we reached here via a call, go ahead and build a new frame.
      */
     EXPORT_PC                         # need for precise GC
-    movl    offGlue_self(%ecx),%eax      # eax<- glue->self
+    movl    %ecx,%eax                 # eax<- self
     push    %ebp
     movl    %esp,%ebp
     subl    $$24,%esp
@@ -307,7 +488,7 @@
     call    dvmCheckSuspendPending
     addl    $$24,%esp
     pop     %ebp
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
 
     /*
      * Need to check to see if debugger or profiler flags got set
@@ -327,7 +508,7 @@
      */
 3:
     leal    (rPC,%ebx,2),rPC       # adjust pc to show target
-    movl    rGLUE,%ecx             # bail expect GLUE already loaded
+    movl    rSELF,%ecx             # bail expect SELF already loaded
     movl    $$1,rINST              # set changeInterp to true
     jmp     common_gotoBail
 
@@ -336,9 +517,9 @@
  * Common code for handling a return instruction
  */
 common_returnFromMethod:
-    movl    rGLUE,%ecx
+    movl    rSELF,%ecx
     /* Set entry mode in case we bail */
-    movb    $$kInterpEntryReturn,offGlue_entryPoint(%ecx)
+    movb    $$kInterpEntryReturn,offThread_entryPoint(%ecx)
     xorl    rINST,rINST   # zero offset in case we switch interps
     call    common_periodicChecks   # Note: expects %ecx to be preserved
 
@@ -349,17 +530,17 @@
     je      common_gotoBail    # break frame, bail out completely
 
     movl    offStackSaveArea_savedPc(%eax),rPC    # pc<- saveArea->savedPC
-    movl    offGlue_self(%ecx),%eax               # eax<- self
-    movl    rINST,offGlue_method(%ecx)  # glue->method = newSave->meethod
-    movl    rFP,offThread_curFrame(%eax)     # self->curFrame = fp
-    movl    offMethod_clazz(rINST),%eax      # eax<- method->clazz
-    FETCH_INST_OPCODE 3 %edx
-    movl    offClassObject_pDvmDex(%eax),%eax # eax<- method->clazz->pDvmDex
+    movl    rINST,offThread_method(%ecx)          # self->method = newSave->meethod
+    movl    rFP,offThread_curFrame(%ecx)          # self->curFrame = fp
+    movl    offMethod_clazz(rINST),%eax           # eax<- method->clazz
+    movl    offThread_curHandlerTable(%ecx),rIBASE
+    movl    offClassObject_pDvmDex(%eax),rINST    # rINST<- method->clazz->pDvmDex
+    FETCH_INST_OPCODE 3 %eax
+    movl    rINST,offThread_methodClassDex(%ecx)
     ADVANCE_PC 3
-    movl    %eax,offGlue_methodClassDex(%ecx)
     /* not bailing - restore entry mode to default */
-    movb    $$kInterpEntryInstr,offGlue_entryPoint(%ecx)
-    GOTO_NEXT_R %edx
+    movb    $$kInterpEntryInstr,offThread_entryPoint(%ecx)
+    GOTO_NEXT_R %eax
 
 /*
  * Prepare to strip the current frame and "longjump" back to caller of
@@ -367,24 +548,27 @@
  *
  * on entry:
  *    rINST holds changeInterp
- *    ecx holds glue pointer
+ *    ecx holds self pointer
  *
- * expected profile: dvmMterpStdBail(MterpGlue *glue, bool changeInterp)
+ * expected profile: dvmMterpStdBail(Thread *self, bool changeInterp)
  */
 common_gotoBail:
-    movl   rPC,offGlue_pc(%ecx)     # export state to glue
-    movl   rFP,offGlue_fp(%ecx)
-    movl   %ecx,OUT_ARG0(%esp)      # glue in arg0
+    movl   rPC,offThread_pc(%ecx)     # export state to self
+    movl   rFP,offThread_fp(%ecx)
+    movl   %ecx,OUT_ARG0(%esp)      # self in arg0
     movl   rINST,OUT_ARG1(%esp)     # changeInterp in arg1
     call   dvmMterpStdBail          # bail out....
 
 
 /*
- * After returning from a "glued" function, pull out the updated values
+ * After returning from a "selfd" function, pull out the updated values
  * and start executing at the next instruction.
  */
  common_resumeAfterGlueCall:
-     LOAD_PC_FP_FROM_GLUE
+     movl  rSELF, %eax
+     movl  offThread_pc(%eax),rPC
+     movl  offThread_fp(%eax),rFP
+     movl  offThread_curHandlerTable(%eax),rIBASE
      FETCH_INST
      GOTO_NEXT
 
@@ -393,36 +577,30 @@
  */
 common_errDivideByZero:
     EXPORT_PC
-    movl    $$.LstrArithmeticException,%eax
-    movl    %eax,OUT_ARG0(%esp)
     movl    $$.LstrDivideByZero,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowArithmeticException
     jmp     common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry, len in eax
  */
 common_errNegativeArraySize:
     EXPORT_PC
-    movl    $$.LstrNegativeArraySizeException,%eax
-    movl    %eax,OUT_ARG0(%esp)
-    xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)                  # arg0<- len
+    call    dvmThrowNegativeArraySizeException   # (len)
     jmp     common_exceptionThrown
 
 /*
  * Attempt to allocate an array with a negative size.
+ * On entry, method name in eax
  */
 common_errNoSuchMethod:
 
     EXPORT_PC
-    movl    $$.LstrNoSuchMethodError,%eax
     movl    %eax,OUT_ARG0(%esp)
-    xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    call    dvmThrowNoSuchMethodError
     jmp     common_exceptionThrown
 
 /*
@@ -431,11 +609,9 @@
  */
 common_errNullObject:
     EXPORT_PC
-    movl    $$.LstrNullPointerException,%eax
-    movl    %eax,OUT_ARG0(%esp)
     xorl    %eax,%eax
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowException
+    movl    %eax,OUT_ARG0(%esp)
+    call    dvmThrowNullPointerException
     jmp     common_exceptionThrown
 
 /*
@@ -447,9 +623,9 @@
 common_errArrayIndex:
     EXPORT_PC
     movl    offArrayObject_length(%eax), %eax
-    movl    %ecx,OUT_ARG0(%esp)
-    movl    %eax,OUT_ARG1(%esp)
-    call    dvmThrowAIOOBE        # dvmThrowAIOO(index, length)
+    movl    %eax,OUT_ARG0(%esp)
+    movl    %ecx,OUT_ARG1(%esp)
+    call    dvmThrowArrayIndexOutOfBoundsException   # args (length, index)
     jmp     common_exceptionThrown
 
 /*
@@ -462,9 +638,9 @@
  * This does not return.
  */
 common_exceptionThrown:
-    movl    rGLUE,%ecx
-    movl    rPC,offGlue_pc(%ecx)
-    movl    rFP,offGlue_fp(%ecx)
+    movl    rSELF,%ecx
+    movl    rPC,offThread_pc(%ecx)
+    movl    rFP,offThread_fp(%ecx)
     movl    %ecx,OUT_ARG0(%esp)
     call    dvmMterp_exceptionThrown
     jmp     common_resumeAfterGlueCall
@@ -479,19 +655,7 @@
  */
 
     .section     .rodata
-.LstrNullPointerException:
-    .asciz    "Ljava/lang/NullPointerException;"
-.LstrArithmeticException:
-    .asciz  "Ljava/lang/ArithmeticException;"
 .LstrDivideByZero:
     .asciz  "divide by zero"
-.LstrNegativeArraySizeException:
-    .asciz  "Ljava/lang/NegativeArraySizeException;"
-.LstrInstantiationError:
-    .asciz  "Ljava/lang/InstantiationError;"
-.LstrNoSuchMethodError:
-    .asciz  "Ljava/lang/NoSuchMethodError;"
-.LstrInternalErrorA:
-    .asciz  "Ljava/lang/InternalError;"
 .LstrFilledNewArrayNotImplA:
     .asciz  "filled-new-array only implemented for 'int'"
diff --git a/vm/mterp/x86/fpcvt.S b/vm/mterp/x86/fpcvt.S
index 22d09bd..983b9eb 100644
--- a/vm/mterp/x86/fpcvt.S
+++ b/vm/mterp/x86/fpcvt.S
@@ -7,8 +7,8 @@
     sarl     $$4,rINST         # rINST<- B
     $load    (rFP,rINST,4)      # %st0<- vB
     andb     $$0xf,%cl          # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
     $instr
     $store  (rFP,%ecx,4)        # vA<- %st0
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/header.S b/vm/mterp/x86/header.S
index cb2ddf8..0dd35b5 100644
--- a/vm/mterp/x86/header.S
+++ b/vm/mterp/x86/header.S
@@ -48,7 +48,7 @@
 Mterp notes:
 
 Some key interpreter variables will be assigned to registers.  Note that each
-will also have an associated spill location (mostly used useful for those assigned
+will also have an associated spill location (mostly useful for those assigned
 to callee save registers).
 
   nick     reg   purpose
@@ -57,30 +57,30 @@
   rINSTw   bx    first 16-bit code of current instruction
   rINSTbl  bl    opcode portion of instruction word
   rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   edx   base of instruction handler table
 
 Notes:
    o High order 16 bits of ebx must be zero on entry to handler
    o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax, edx and ecx are scratch, rINSTw/ebx sometimes scratch
-   o rPC is in the caller save set, and will be killed across external calls. Don't
-     forget to SPILL/UNSPILL it around call points
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
 
 */
 
-#define rGLUE    (%ebp)
+#define rSELF    (%ebp)
 #define rPC      %esi
 #define rFP      %edi
 #define rINST    %ebx
 #define rINSTw   %bx
 #define rINSTbh  %bh
 #define rINSTbl  %bl
+#define rIBASE   %edx
 
 
 /* Frame diagram while executing dvmMterpStdRun, high to low addresses */
 #define IN_ARG0        ( 12)
 #define CALLER_RP      (  8)
 #define PREV_FP        (  4)
-#define rGLUE_SPILL    (  0) /* <- dvmMterpStdRun ebp */
+#define rSELF_SPILL    (  0) /* <- dvmMterpStdRun ebp */
 /* Spill offsets relative to %ebp */
 #define EDI_SPILL      ( -4)
 #define ESI_SPILL      ( -8)
@@ -88,13 +88,13 @@
 #define rPC_SPILL      (-16)
 #define rFP_SPILL      (-20)
 #define rINST_SPILL    (-24)
-#define TMP_SPILL1     (-28)
-#define TMP_SPILL2     (-32)
-#define TMP_SPILL3     (-36)
-#define LOCAL0_OFFSET  (-40)
-#define LOCAL1_OFFSET  (-44)
-#define LOCAL2_OFFSET  (-48)
-#define LOCAL3_OFFSET  (-52)
+#define rIBASE_SPILL   (-28)
+#define TMP_SPILL1     (-32)
+#define TMP_SPILL2     (-36)
+#define TMP_SPILL3     (-20)
+#define LOCAL0_OFFSET  (-44)
+#define LOCAL1_OFFSET  (-48)
+#define LOCAL2_OFFSET  (-52)
 /* Out Arg offsets, relative to %sp */
 #define OUT_ARG4       ( 16)
 #define OUT_ARG3       ( 12)
@@ -112,17 +112,26 @@
 #define SPILL_TMP3(reg) movl reg,TMP_SPILL3(%ebp)
 #define UNSPILL_TMP3(reg) movl TMP_SPILL3(%ebp),reg
 
-/* save/restore the PC and/or FP from the glue struct */
-.macro SAVE_PC_FP_TO_GLUE _reg
-    movl     rGLUE,\_reg
-    movl     rPC,offGlue_pc(\_reg)
-    movl     rFP,offGlue_fp(\_reg)
+#if defined(WITH_JIT)
+.macro GET_JIT_PROF_TABLE _self _reg
+    movl    offThread_pJitProfTable(\_self),\_reg
+.endm
+.macro GET_JIT_THRESHOLD _self _reg
+    movl    offThread_jitThreshold(\_self),\_reg
+.endm
+#endif
+
+/* save/restore the PC and/or FP from the self struct */
+.macro SAVE_PC_FP_TO_SELF _reg
+    movl     rSELF,\_reg
+    movl     rPC,offThread_pc(\_reg)
+    movl     rFP,offThread_fp(\_reg)
 .endm
 
-.macro LOAD_PC_FP_FROM_GLUE
-    movl    rGLUE,rFP
-    movl    offGlue_pc(rFP),rPC
-    movl    offGlue_fp(rFP),rFP
+.macro LOAD_PC_FP_FROM_SELF
+    movl    rSELF,rFP
+    movl    offThread_pc(rFP),rPC
+    movl    offThread_fp(rFP),rFP
 .endm
 
 /* The interpreter assumes a properly aligned stack on entry, and
@@ -131,7 +140,7 @@
 
 /*
  * "export" the PC to the interpreted stack frame, f/b/o future exception
- * objects.  Must * be done *before* something calls dvmThrowException.
+ * objects.  Must be done *before* something throws.
  *
  * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
  * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
@@ -207,7 +216,7 @@
 .macro GOTO_NEXT
      movzx   rINSTbl,%eax
      movzbl  rINSTbh,rINST
-     jmp     *dvmAsmInstructionJmpTable(,%eax,4)
+     jmp     *(rIBASE,%eax,4)
 .endm
 
    /*
@@ -216,7 +225,17 @@
     */
 .macro GOTO_NEXT_R _reg
      movzbl  1(rPC),rINST
-     jmp     *dvmAsmInstructionJmpTable(,\_reg,4)
+     jmp     *(rIBASE,\_reg,4)
+.endm
+
+   /*
+    * Jumbo version of GOTO_NEXT that assumes _reg preloaded with table
+    * offset of the jumbo instruction, which is the top half of the extended
+    * opcode + 0x100.  Loads rINST with BBBB field, similar to GOTO_NEXT_R
+    */
+.macro GOTO_NEXT_JUMBO_R _reg
+     movzwl  6(rPC),rINST
+     jmp     *(rIBASE,\_reg,4)
 .endm
 
 /*
@@ -238,310 +257,9 @@
     movl     \_reg,4*(\_offset)(rFP,\_vreg,4)
 .endm
 
-#if 1
-
-#define rFinish %edx
-
-/* Macros for x86-atom handlers */
-    /*
-    * Get the 32-bit value from a dalvik register.
-    */
-
-    .macro      GET_VREG _vreg
-    movl        (rFP,\_vreg, 4), \_vreg
-    .endm
-
-   /*
-    * Fetch the next instruction from the specified offset. Advances rPC
-    * to point to the next instruction. "_count" is in 16-bit code units.
-    *
-    * This must come AFTER anything that can throw an exception, or the
-    * exception catch may miss. (This also implies that it must come after
-    * EXPORT_PC())
-    */
-
-    .macro      FETCH_ADVANCE_INST _count
-    add         $$(\_count*2), rPC
-    movzwl      (rPC), rINST
-    .endm
-
-   /*
-    * Fetch the next instruction from an offset specified by _reg. Updates
-    * rPC to point to the next instruction. "_reg" must specify the distance
-    * in bytes, *not* 16-bit code units, and may be a signed value.
-    */
-
-    .macro      FETCH_ADVANCE_INST_RB _reg
-    addl        \_reg, rPC
-    movzwl      (rPC), rINST
-    .endm
-
-   /*
-    * Fetch a half-word code unit from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op BBBB, it
-    * fetches BBBB.
-    */
-
-    .macro      FETCH _count _reg
-    movzwl      (\_count*2)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch a half-word code unit from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * This variant treats the value as signed.
-    */
-
-    .macro      FETCHs _count _reg
-    movswl      (\_count*2)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch the first byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op CC|BB, it
-    * fetches BB.
-    */
-
-    .macro      FETCH_BB _count _reg
-    movzbl      (\_count*2)(rPC), \_reg
-    .endm
-
-    /*
-    * Fetch the second byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * For example, given instruction of format: AA|op CC|BB, it
-    * fetches CC.
-    */
-
-    .macro      FETCH_CC _count _reg
-    movzbl      (\_count*2 + 1)(rPC), \_reg
-    .endm
-
-   /*
-    * Fetch the second byte from an offset past the current PC. The
-    * "_count" value is in 16-bit code units. Does not advance rPC.
-    * This variant treats the value as signed.
-    */
-
-    .macro      FETCH_CCs _count _reg
-    movsbl      (\_count*2 + 1)(rPC), \_reg
-    .endm
-
-
-   /*
-    * Fetch one byte from an offset past the current PC.  Pass in the same
-    * "_count" as you would for FETCH, and an additional 0/1 indicating which
-    * byte of the halfword you want (lo/hi).
-    */
-
-    .macro      FETCH_B _reg  _count  _byte
-    movzbl      (\_count*2+\_byte)(rPC), \_reg
-    .endm
-
-   /*
-    * Put the instruction's opcode field into the specified register.
-    */
-
-    .macro      GET_INST_OPCODE _reg
-    movzbl      rINSTbl, \_reg
-    .endm
-
-   /*
-    * Begin executing the opcode in _reg.
-    */
-
-    .macro      GOTO_OPCODE _reg
-    shl         $$${handler_size_bits}, \_reg
-    addl        $$dvmAsmInstructionStart,\_reg
-    jmp         *\_reg
-    .endm
-
-
-
-   /*
-    * Macros pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH _rFinish
-    movzbl      (rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_JMPa _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish and _count should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH_ADV _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_JMP _count _rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $$(\_count*2), rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-    .macro      FGETOP_JMP2 _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-    .macro      OLD_JMP_1 _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    shl         $$${handler_size_bits}, \_rFinish
-    .endm
-
-    .macro      OLD_JMP_2 _rFinish
-    addl        $$dvmAsmInstructionStart,\_rFinish
-    .endm
-
-    .macro      OLD_JMP_3 _count
-    addl        $$(\_count*2), rPC
-    .endm
-
-    .macro      OLD_JMP_4 _rFinish
-    movzbl      1(rPC), rINST
-    jmp         *\_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_1 _reg _rFinish
-    movzbl      (rPC, \_reg), \_rFinish
-    shl         $$${handler_size_bits}, \_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_2 _rFinish
-    addl        $$dvmAsmInstructionStart,\_rFinish
-    .endm
-
-    .macro      OLD_JMP_A_3 _reg _rFinish
-    addl        \_reg, rPC
-    movzbl      1(rPC, \_reg), rINST
-    jmp         *\_rFinish
-    .endm
-
-   /*
-    * Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
-    * by using a jump table. _rFinish and _reg should must be the same register for
-    * both macros.
-    */
-
-    .macro      FFETCH_ADV_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    .endm
-
-    .macro      FGETOP_RB_JMP _reg _rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_INST, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH _rFinish
-    movzbl      (rPC), \_rFinish
-    movzbl      1(rPC), rINST
-    .endm
-
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH_ADVANCE _count _rFinish
-    movzbl      (\_count*2)(rPC), \_rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $$(\_count*2), rPC
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE using
-    * a jump table. This macro should be called before FINISH_JMP where
-    * rFinish should be the same register containing the opcode value.
-    * This is an attempt to split up FINISH in order to reduce or remove
-    * potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_FETCH_ADVANCE_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    .endm
-
-   /*
-    * Attempts to speed up GOTO_OPCODE using a jump table. This macro should
-    * be called after a FINISH_FETCH* instruction where rFinish should be the
-    * same register containing the opcode value. This is an attempt to split up
-    * FINISH in order to reduce or remove potential stalls due to the wait for rFINISH.
-    */
-
-    .macro      FINISH_JMP _rFinish
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_INST, GET_INST_OPCODE, GOTO_OPCODE by using
-    * a jump table. Uses a single macro - but it should be faster if we
-    * split up the fetch for rFinish and the jump using rFinish.
-    */
-
-    .macro      FINISH_A
-    movzbl      (rPC), rFinish
-    movzbl      1(rPC), rINST
-    jmp         *dvmAsmInstructionJmpTable(,rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE,
-    * GOTO_OPCODE by using a jump table. Uses a single macro -
-    * but it should be faster if we split up the fetch for rFinish
-    * and the jump using rFinish.
-    */
-
-    .macro      FINISH _count
-    movzbl      (\_count*2)(rPC), rFinish
-    movzbl      (\_count*2 + 1)(rPC), rINST
-    addl        $$(\_count*2), rPC
-    jmp         *dvmAsmInstructionJmpTable(,rFinish, 4)
-    .endm
-
-   /*
-    * Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE,
-    * GOTO_OPCODE by using a jump table. Uses a single macro -
-    * but it should be faster if we split up the fetch for rFinish
-    * and the jump using rFinish.
-    */
-
-    .macro      FINISH_RB _reg _rFinish
-    movzbl      (\_reg, rPC), \_rFinish
-    movzbl      1(\_reg, rPC), rINST
-    addl        \_reg, rPC
-    jmp         *dvmAsmInstructionJmpTable(,\_rFinish, 4)
-    .endm
-
 #define sReg0 LOCAL0_OFFSET(%ebp)
 #define sReg1 LOCAL1_OFFSET(%ebp)
 #define sReg2 LOCAL2_OFFSET(%ebp)
-#define sReg3 LOCAL3_OFFSET(%ebp)
 
    /*
     * Hard coded helper values.
@@ -577,7 +295,6 @@
 
 .LintMax:
 .long   0x7FFFFFFF
-#endif
 
 
 /*
@@ -585,3 +302,7 @@
  * to expand the macros into assembler assignment statements.
  */
 #include "../common/asm-constants.h"
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
diff --git a/vm/mterp/x86/shop2addr.S b/vm/mterp/x86/shop2addr.S
index c1c0e17..c891259 100644
--- a/vm/mterp/x86/shop2addr.S
+++ b/vm/mterp/x86/shop2addr.S
@@ -9,7 +9,7 @@
     andb     $$0xf,rINSTbl          # rINST<- A
     GET_VREG_R %eax rINST           # eax<- vAA
     $instr                          # ex: sarl %cl,%eax
-    FETCH_INST_OPCODE 1 %edx
+    FETCH_INST_OPCODE 1 %ecx
     SET_VREG $result rINST
     ADVANCE_PC 1
-    GOTO_NEXT_R %edx
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/stub.S b/vm/mterp/x86/stub.S
index 886bdf7..fb5c977 100644
--- a/vm/mterp/x86/stub.S
+++ b/vm/mterp/x86/stub.S
@@ -1,8 +1,9 @@
     /* (stub) */
-    SAVE_PC_FP_TO_GLUE %ecx          # leaves rGLUE in %ecx
-    movl %ecx,OUT_ARG0(%esp)         # glue is first arg to function
+    SAVE_PC_FP_TO_SELF %ecx          # leaves rSELF in %ecx
+    movl %ecx,OUT_ARG0(%esp)         # self is first arg to function
     call      dvmMterp_${opcode}     # do the real work
-    mov       rGLUE,%ecx
-    LOAD_PC_FP_FROM_GLUE             # retrieve updated values
+    movl      rSELF,%ecx
+    LOAD_PC_FP_FROM_SELF             # retrieve updated values
+    movl      offThread_curHandlerTable(%ecx),rIBASE  # set up rIBASE
     FETCH_INST
     GOTO_NEXT
diff --git a/vm/mterp/x86/unop.S b/vm/mterp/x86/unop.S
index faa41f4..ad9c79b 100644
--- a/vm/mterp/x86/unop.S
+++ b/vm/mterp/x86/unop.S
@@ -8,10 +8,10 @@
     sarl     $$4,rINST             # rINST<- B
     GET_VREG_R %eax rINST           # eax<- vB
     andb     $$0xf,%cl              # ecx<- A
-    FETCH_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
     $pre0
     $pre1
     $instr
     SET_VREG %eax %ecx
-    GOTO_NEXT_R %edx
+    FETCH_INST_OPCODE 1 %ecx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %ecx
diff --git a/vm/mterp/x86/unopWide.S b/vm/mterp/x86/unopWide.S
index 385b4ba..41b5d10 100644
--- a/vm/mterp/x86/unopWide.S
+++ b/vm/mterp/x86/unopWide.S
@@ -14,8 +14,8 @@
     $instr1   # ex: negl %eax
     $instr2   # ex: adcl $$0,%ecx
     $instr3   # ex: negl %ecx
-    GET_INST_OPCODE 1 %edx
-    ADVANCE_PC 1
     SET_VREG_WORD %eax rINST 0        # v[A+0] <- eax
+    GET_INST_OPCODE 1 %eax
     SET_VREG_WORD %ecx rINST 1        # v[A+1] <- ecx
-    GOTO_NEXT_R %edx
+    ADVANCE_PC 1
+    GOTO_NEXT_R %eax
diff --git a/vm/native/InternalNative.c b/vm/native/InternalNative.c
index 9dc61d8..447131f 100644
--- a/vm/native/InternalNative.c
+++ b/vm/native/InternalNative.c
@@ -28,10 +28,12 @@
 static DalvikNativeClass gDvmNativeMethodSet[] = {
     { "Ljava/lang/Object;",               dvm_java_lang_Object, 0 },
     { "Ljava/lang/Class;",                dvm_java_lang_Class, 0 },
+    { "Ljava/lang/Double;",               dvm_java_lang_Double, 0 },
+    { "Ljava/lang/Float;",                dvm_java_lang_Float, 0 },
+    { "Ljava/lang/Math;",                 dvm_java_lang_Math, 0 },
     { "Ljava/lang/Runtime;",              dvm_java_lang_Runtime, 0 },
     { "Ljava/lang/String;",               dvm_java_lang_String, 0 },
     { "Ljava/lang/System;",               dvm_java_lang_System, 0 },
-    { "Ljava/lang/SystemProperties;",     dvm_java_lang_SystemProperties, 0 },
     { "Ljava/lang/Throwable;",            dvm_java_lang_Throwable, 0 },
     { "Ljava/lang/VMClassLoader;",        dvm_java_lang_VMClassLoader, 0 },
     { "Ljava/lang/VMThread;",             dvm_java_lang_VMThread, 0 },
@@ -142,8 +144,7 @@
 void dvmAbstractMethodStub(const u4* args, JValue* pResult)
 {
     LOGD("--- called into dvmAbstractMethodStub\n");
-    dvmThrowException("Ljava/lang/AbstractMethodError;",
-        "abstract method not implemented");
+    dvmThrowAbstractMethodError("abstract method not implemented");
 }
 
 
@@ -155,19 +156,21 @@
  */
 bool dvmVerifyObjectInClass(Object* obj, ClassObject* clazz)
 {
-    const char* exceptionClass = NULL;
+    ClassObject* exceptionClass = NULL;
+
     if (obj == NULL) {
-        exceptionClass = "Ljava/lang/NullPointerException;";
+        exceptionClass = gDvm.exNullPointerException;
     } else if (!dvmInstanceof(obj->clazz, clazz)) {
-        exceptionClass = "Ljava/lang/IllegalArgumentException;";
+        exceptionClass = gDvm.exIllegalArgumentException;
     }
+
     if (exceptionClass != NULL) {
         char* expectedClassName = dvmHumanReadableDescriptor(clazz->descriptor);
         char* actualClassName = (obj != NULL)
             ? dvmHumanReadableDescriptor(obj->clazz->descriptor)
             : strdup("null");
         dvmThrowExceptionFmt(exceptionClass,
-            "expected receiver of type %s, not %s",
+            "expected receiver of type %s, but got %s",
             expectedClassName, actualClassName);
         free(expectedClassName);
         free(actualClassName);
@@ -177,40 +180,6 @@
 }
 
 /*
- * Validate a "binary" class name, e.g. "java.lang.String" or "[I".
- */
-static bool validateClassName(const char* name)
-{
-    int len = strlen(name);
-    int i = 0;
-
-    /* check for reasonable array types */
-    if (name[0] == '[') {
-        while (name[i] == '[')
-            i++;
-
-        if (name[i] == 'L') {
-            /* array of objects, make sure it ends well */
-            if (name[len-1] != ';')
-                return false;
-        } else if (strchr(PRIM_TYPE_TO_LETTER, name[i]) != NULL) {
-            if (i != len-1)
-                return false;
-        } else {
-            return false;
-        }
-    }
-
-    /* quick check for illegal chars */
-    for ( ; i < len; i++) {
-        if (name[i] == '/')
-            return false;
-    }
-
-    return true;
-}
-
-/*
  * Find a class by name, initializing it if requested.
  */
 ClassObject* dvmFindClassByName(StringObject* nameObj, Object* loader,
@@ -221,7 +190,7 @@
     char* descriptor = NULL;
 
     if (nameObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         goto bail;
     }
     name = dvmCreateCstrFromString(nameObj);
@@ -231,9 +200,9 @@
      * is especially handy for array types, since we want to avoid
      * auto-generating bogus array classes.
      */
-    if (!validateClassName(name)) {
+    if (!dexIsValidClassName(name, true)) {
         LOGW("dvmFindClassByName rejecting '%s'\n", name);
-        dvmThrowException("Ljava/lang/ClassNotFoundException;", name);
+        dvmThrowClassNotFoundException(name);
         goto bail;
     }
 
@@ -253,8 +222,7 @@
         Object* oldExcep = dvmGetException(self);
         dvmAddTrackedAlloc(oldExcep, self);     /* don't let this be GCed */
         dvmClearException(self);
-        dvmThrowChainedException("Ljava/lang/ClassNotFoundException;",
-            name, oldExcep);
+        dvmThrowChainedClassNotFoundException(name, oldExcep);
         dvmReleaseTrackedAlloc(oldExcep, self);
     } else {
         LOGVV("GOOD: load %s (%d) --> %p ldr=%p\n",
@@ -291,67 +259,3 @@
 
     return flags & JAVA_FLAGS_MASK;
 }
-
-
-#define NUM_DOPRIV_FUNCS    4
-
-/*
- * Determine if "method" is a "privileged" invocation, i.e. is it one
- * of the variations of AccessController.doPrivileged().
- *
- * Because the security stuff pulls in a pile of stuff that we may not
- * want or need, we don't do the class/method lookups at init time, but
- * instead on first use.
- */
-bool dvmIsPrivilegedMethod(const Method* method)
-{
-    int i;
-
-    assert(method != NULL);
-
-    if (!gDvm.javaSecurityAccessControllerReady) {
-        /*
-         * Populate on first use.  No concurrency risk since we're just
-         * finding pointers to fixed structures.
-         */
-        static const char* kSignatures[NUM_DOPRIV_FUNCS] = {
-            "(Ljava/security/PrivilegedAction;)Ljava/lang/Object;",
-            "(Ljava/security/PrivilegedExceptionAction;)Ljava/lang/Object;",
-            "(Ljava/security/PrivilegedAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;",
-            "(Ljava/security/PrivilegedExceptionAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;",
-        };
-        ClassObject* clazz;
-
-        clazz = dvmFindClassNoInit("Ljava/security/AccessController;", NULL);
-        if (clazz == NULL) {
-            LOGW("Couldn't find java/security/AccessController\n");
-            return false;
-        }
-
-        assert(NELEM(gDvm.methJavaSecurityAccessController_doPrivileged) ==
-               NELEM(kSignatures));
-
-        /* verify init */
-        for (i = 0; i < NUM_DOPRIV_FUNCS; i++) {
-            gDvm.methJavaSecurityAccessController_doPrivileged[i] =
-                dvmFindDirectMethodByDescriptor(clazz, "doPrivileged", kSignatures[i]);
-            if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == NULL) {
-                LOGW("Warning: couldn't find java/security/AccessController"
-                    ".doPrivileged %s\n", kSignatures[i]);
-                return false;
-            }
-        }
-
-        /* all good, raise volatile readiness flag */
-        android_atomic_release_store(true,
-            &gDvm.javaSecurityAccessControllerReady);
-    }
-
-    for (i = 0; i < NUM_DOPRIV_FUNCS; i++) {
-        if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == method) {
-            //LOGI("+++ doPriv match\n");
-            return true;
-        }
-    }
-    return false;
-}
diff --git a/vm/native/InternalNativePriv.h b/vm/native/InternalNativePriv.h
index 0e54081..0cbb763 100644
--- a/vm/native/InternalNativePriv.h
+++ b/vm/native/InternalNativePriv.h
@@ -36,6 +36,18 @@
 #define RETURN_DOUBLE(_val)     do { pResult->d = (_val); return; } while(0)
 #define RETURN_PTR(_val)        do { pResult->l = (_val); return; } while(0)
 
+/*
+ * Normally a method that has an "inline native" will be invoked using
+ * execute-inline. If the method is invoked via reflection, JNI, or by
+ * virtual dispatch (in the case of String.equals, which we may arrive
+ * at via Object.equals), we need a non-"inline native" implementation.
+ *
+ * This macro is used to implement the native methods that bridge this gap.
+ */
+#define MAKE_INTRINSIC_TRAMPOLINE(INTRINSIC_FN) \
+    extern bool INTRINSIC_FN(u4 arg0, u4 arg1, u4 arg2, u4 arg3, \
+            JValue* pResult); \
+    INTRINSIC_FN(args[0], args[1], args[2], args[3], pResult);
 
 /*
  * Verify that "obj" is non-null and is an instance of "clazz".
@@ -68,25 +80,16 @@
 void dvmFreeDexOrJar(void* vptr);
 
 /*
- * Determine if "method" is a "privileged" invocation, i.e. is it one
- * of the variations of AccessController.doPrivileged().
- *
- * Because the security stuff pulls in a pile of stuff that we may not
- * want or need, we don't do the class/method lookups at init time, but
- * instead on first use.
- */
-bool dvmIsPrivilegedMethod(const Method* method);
-
-
-/*
  * Tables of methods.
  */
 extern const DalvikNativeMethod dvm_java_lang_Object[];
 extern const DalvikNativeMethod dvm_java_lang_Class[];
+extern const DalvikNativeMethod dvm_java_lang_Double[];
+extern const DalvikNativeMethod dvm_java_lang_Float[];
+extern const DalvikNativeMethod dvm_java_lang_Math[];
 extern const DalvikNativeMethod dvm_java_lang_Runtime[];
 extern const DalvikNativeMethod dvm_java_lang_String[];
 extern const DalvikNativeMethod dvm_java_lang_System[];
-extern const DalvikNativeMethod dvm_java_lang_SystemProperties[];
 extern const DalvikNativeMethod dvm_java_lang_Throwable[];
 extern const DalvikNativeMethod dvm_java_lang_VMClassLoader[];
 extern const DalvikNativeMethod dvm_java_lang_VMThread[];
diff --git a/vm/native/dalvik_system_DexFile.c b/vm/native/dalvik_system_DexFile.c
index e15b432..9c9a6e1 100644
--- a/vm/native/dalvik_system_DexFile.c
+++ b/vm/native/dalvik_system_DexFile.c
@@ -93,8 +93,7 @@
                 hashcmpDexOrJar, false);
     dvmHashTableUnlock(gDvm.userDexFiles);
     if (result == NULL) {
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "invalid DexFile cookie");
+        dvmThrowRuntimeException("invalid DexFile cookie");
         return false;
     }
 
@@ -161,7 +160,7 @@
     char* outputName;
 
     if (sourceNameObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
@@ -194,7 +193,7 @@
      */
     if (dvmClassPathContains(gDvm.bootClassPath, sourceName)) {
         LOGW("Refusing to reopen boot DEX '%s'\n", sourceName);
-        dvmThrowException("Ljava/io/IOException;",
+        dvmThrowIOException(
             "Re-opening BOOTCLASSPATH DEX files is not allowed");
         free(sourceName);
         free(outputName);
@@ -223,7 +222,7 @@
         pDexOrJar->pDexMemory = NULL;
     } else {
         LOGV("Unable to open DEX file '%s'\n", sourceName);
-        dvmThrowException("Ljava/io/IOException;", "unable to open DEX file");
+        dvmThrowIOException("unable to open DEX file");
     }
 
     if (pDexOrJar != NULL) {
@@ -256,17 +255,16 @@
     DexOrJar* pDexOrJar = NULL;
 
     if (fileContentsObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
-    /* TODO: Avoid making a copy of the array. */
+    /* TODO: Avoid making a copy of the array. (note array *is* modified) */
     length = fileContentsObj->length;
     pBytes = (u1*) malloc(length);
 
     if (pBytes == NULL) {
-        dvmThrowException("Ljava/lang/RuntimeException;",
-                "unable to allocate DEX memory");
+        dvmThrowRuntimeException("unable to allocate DEX memory");
         RETURN_VOID();
     }
 
@@ -275,8 +273,7 @@
     if (dvmRawDexFileOpenArray(pBytes, length, &pRawDexFile) != 0) {
         LOGV("Unable to open in-memory DEX file\n");
         free(pBytes);
-        dvmThrowException("Ljava/io/RuntimeException;",
-                "unable to open in-memory DEX file");
+        dvmThrowRuntimeException("unable to open in-memory DEX file");
         RETURN_VOID();
     }
 
@@ -363,7 +360,8 @@
 
     name = dvmCreateCstrFromString(nameObj);
     descriptor = dvmDotToDescriptor(name);
-    LOGV("--- Explicit class load '%s' 0x%08x\n", descriptor, cookie);
+    LOGV("--- Explicit class load '%s' l=%p c=0x%08x\n",
+        descriptor, loader, cookie);
     free(name);
 
     if (!validateCookie(cookie))
@@ -491,11 +489,11 @@
 
     name = dvmCreateCstrFromString(nameObj);
     if (name == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
     if (access(name, R_OK) != 0) {
-        dvmThrowException("Ljava/io/FileNotFoundException;", name);
+        dvmThrowFileNotFoundException(name);
         free(name);
         RETURN_VOID();
     }
@@ -506,7 +504,7 @@
     switch (status) {
     default: //FALLTHROUGH
     case DEX_CACHE_BAD_ARCHIVE:
-        dvmThrowException("Ljava/io/IOException;", name);
+        dvmThrowIOException(name);
         result = -1;
         break;
     case DEX_CACHE_OK:
@@ -516,7 +514,7 @@
         result = true;
         break;
     case DEX_CACHE_STALE_ODEX:
-        dvmThrowException("Ldalvik/system/StaleDexCacheError;", name);
+        dvmThrowStaleDexCacheError(name);
         result = -1;
         break;
     }
diff --git a/vm/native/dalvik_system_VMDebug.c b/vm/native/dalvik_system_VMDebug.c
index 55fb684..d511a8f 100644
--- a/vm/native/dalvik_system_VMDebug.c
+++ b/vm/native/dalvik_system_VMDebug.c
@@ -21,6 +21,7 @@
 #include "native/InternalNativePriv.h"
 #include "hprof/Hprof.h"
 
+#include <cutils/array.h>
 #include <string.h>
 #include <unistd.h>
 #include <errno.h>
@@ -39,15 +40,13 @@
 
     InstField* field = dvmFindInstanceField(obj->clazz, "descriptor", "I");
     if (field == NULL) {
-        dvmThrowException("Ljava/lang/NoSuchFieldException;",
-            "No FileDescriptor.descriptor field");
+        dvmThrowNoSuchFieldException("No FileDescriptor.descriptor field");
         return -1;
     }
 
     int fd = dvmGetFieldInt(obj, field->byteOffset);
     if (fd < 0) {
-        dvmThrowExceptionFmt("Ljava/lang/RuntimeException;",
-            "Invalid file descriptor");
+        dvmThrowRuntimeException("Invalid file descriptor");
         return -1;
     }
 
@@ -55,57 +54,6 @@
 }
 
 /*
- * Convert an array of char* into a String[].
- *
- * Returns NULL on failure, with an exception raised.
- */
-static ArrayObject* convertStringArray(char** strings, size_t count)
-{
-    Thread* self = dvmThreadSelf();
-
-    /*
-     * Allocate an array to hold the String objects.
-     */
-    ClassObject* stringArrayClass =
-        dvmFindArrayClass("[Ljava/lang/String;", NULL);
-    if (stringArrayClass == NULL) {
-        /* shouldn't happen */
-        LOGE("Unable to find [Ljava/lang/String;\n");
-        dvmAbort();
-    }
-
-    ArrayObject* stringArray =
-        dvmAllocArrayByClass(stringArrayClass, count, ALLOC_DEFAULT);
-    if (stringArray == NULL) {
-        /* probably OOM */
-        LOGD("Failed allocating array of %d strings\n", count);
-        assert(dvmCheckException(self));
-        return NULL;
-    }
-
-    /*
-     * Create the individual String objects and add them to the array.
-     */
-    size_t i;
-    for (i = 0; i < count; i++) {
-        Object *str =
-            (Object *)dvmCreateStringFromCstr(strings[i]);
-        if (str == NULL) {
-            /* probably OOM; drop out now */
-            assert(dvmCheckException(self));
-            dvmReleaseTrackedAlloc((Object*)stringArray, self);
-            return NULL;
-        }
-        dvmSetObjectArrayElement(stringArray, i, str);
-        /* stored in tracked array, okay to release */
-        dvmReleaseTrackedAlloc(str, self);
-    }
-
-    dvmReleaseTrackedAlloc((Object*)stringArray, self);
-    return stringArray;
-}
-
-/*
  * static String[] getVmFeatureList()
  *
  * Return a set of strings describing available VM features (this is chiefly
@@ -115,22 +63,21 @@
 static void Dalvik_dalvik_system_VMDebug_getVmFeatureList(const u4* args,
     JValue* pResult)
 {
-    static const int MAX_FEATURE_COUNT = 10;
-    char* features[MAX_FEATURE_COUNT];
-    int idx = 0;
+    Array* features = arrayCreate();
 
     /* VM responds to DDMS method profiling requests */
-    features[idx++] = "method-trace-profiling";
-    features[idx++] = "method-trace-profiling-streaming";
+    arrayAdd(features, "method-trace-profiling");
+    arrayAdd(features, "method-trace-profiling-streaming");
     /* VM responds to DDMS heap dump requests */
-    features[idx++] = "hprof-heap-dump";
-    features[idx++] = "hprof-heap-dump-streaming";
+    arrayAdd(features, "hprof-heap-dump");
+    arrayAdd(features, "hprof-heap-dump-streaming");
 
-    assert(idx <= MAX_FEATURE_COUNT);
-
-    LOGV("+++ sending up %d features\n", idx);
-    ArrayObject* arrayObj = convertStringArray(features, idx);
-    RETURN_PTR(arrayObj);       /* will be null on OOM */
+    char** strings = (char**) arrayUnwrap(features);
+    int count = arraySize(features);
+    ArrayObject* result = dvmCreateStringArray(strings, count);
+    dvmReleaseTrackedAlloc((Object*) result, dvmThreadSelf());
+    arrayFree(features);
+    RETURN_PTR(result);
 }
 
 
@@ -317,7 +264,7 @@
     }
 
     if (bufferSize < 1024) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;", NULL);
+        dvmThrowIllegalArgumentException(NULL);
         RETURN_VOID();
     }
 
@@ -333,7 +280,7 @@
 
         fd = dup(origFd);
         if (fd < 0) {
-            dvmThrowExceptionFmt("Ljava/lang/RuntimeException;",
+            dvmThrowExceptionFmt(gDvm.exRuntimeException,
                 "dup(%d) failed: %s", origFd, strerror(errno));
             RETURN_VOID();
         }
@@ -576,7 +523,7 @@
      * Only one of these may be NULL.
      */
     if (fileNameStr == NULL && fileDescriptor == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
@@ -584,7 +531,7 @@
         fileName = dvmCreateCstrFromString(fileNameStr);
         if (fileName == NULL) {
             /* unexpected -- malloc failure? */
-            dvmThrowException("Ljava/lang/RuntimeException;", "malloc failure?");
+            dvmThrowRuntimeException("malloc failure?");
             RETURN_VOID();
         }
     } else {
@@ -605,8 +552,8 @@
 
     if (result != 0) {
         /* ideally we'd throw something more specific based on actual failure */
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "Failure during heap dump -- check log output for details");
+        dvmThrowRuntimeException(
+            "Failure during heap dump; check log output for details");
         RETURN_VOID();
     }
 
@@ -627,8 +574,8 @@
 
     if (result != 0) {
         /* ideally we'd throw something more specific based on actual failure */
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "Failure during heap dump -- check log output for details");
+        dvmThrowRuntimeException(
+            "Failure during heap dump; check log output for details");
         RETURN_VOID();
     }
 
@@ -659,7 +606,7 @@
     bool result = false;
 
     if (classAndMethodDescStr == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
@@ -673,16 +620,14 @@
 
     char* methodName = strchr(classAndMethodDesc, '.');
     if (methodName == NULL) {
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "method name not found in string");
+        dvmThrowRuntimeException("method name not found in string");
         RETURN_VOID();
     }
     *methodName++ = '\0';
 
     char* methodDescr = strchr(methodName, ':');
     if (methodDescr == NULL) {
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "method descriptor not found in string");
+        dvmThrowRuntimeException("method descriptor not found in string");
         RETURN_VOID();
     }
     *methodDescr++ = '\0';
diff --git a/vm/native/dalvik_system_VMRuntime.c b/vm/native/dalvik_system_VMRuntime.c
index fec24be..af0df7a 100644
--- a/vm/native/dalvik_system_VMRuntime.c
+++ b/vm/native/dalvik_system_VMRuntime.c
@@ -20,6 +20,7 @@
 #include "Dalvik.h"
 #include "native/InternalNativePriv.h"
 
+#include <cutils/array.h>
 #include <limits.h>
 
 
@@ -54,20 +55,6 @@
 }
 
 /*
- * public native void gcSoftReferences()
- *
- * Does a GC and forces collection of SoftReferences that are
- * not strongly-reachable.
- */
-static void Dalvik_dalvik_system_VMRuntime_gcSoftReferences(const u4* args,
-    JValue* pResult)
-{
-    dvmCollectGarbage(true);
-
-    RETURN_VOID();
-}
-
-/*
  * public native void runFinalizationSync()
  *
  * Does not return until any pending finalizers have been called.
@@ -131,11 +118,11 @@
     ArrayObject* newArray;
 
     if (elementClass == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
     if (length < 0) {
-        dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+        dvmThrowNegativeArraySizeException(length);
         RETURN_VOID();
     }
 
@@ -157,7 +144,7 @@
 {
     ArrayObject* array = (ArrayObject*) args[1];
     if (!dvmIsArray(array)) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;", NULL);
+        dvmThrowIllegalArgumentException(NULL);
         RETURN_VOID();
     }
     // TODO: we should also check that this is a non-movable array.
@@ -172,24 +159,68 @@
     RETURN_VOID();
 }
 
+static void Dalvik_dalvik_system_VMRuntime_properties(const u4* args,
+    JValue* pResult)
+{
+    char** strings = (char**) arrayUnwrap(gDvm.properties);
+    int count = arraySize(gDvm.properties);
+    ArrayObject* result = dvmCreateStringArray(strings, count);
+    dvmReleaseTrackedAlloc((Object*) result, dvmThreadSelf());
+    RETURN_PTR(result);
+}
+
+static void returnCString(JValue* pResult, const char* s)
+{
+    Object* result = (Object*) dvmCreateStringFromCstr(s);
+    dvmReleaseTrackedAlloc(result, dvmThreadSelf());
+    RETURN_PTR(result);
+}
+
+static void Dalvik_dalvik_system_VMRuntime_bootClassPath(const u4* args,
+    JValue* pResult)
+{
+    returnCString(pResult, gDvm.bootClassPathStr);
+}
+
+static void Dalvik_dalvik_system_VMRuntime_classPath(const u4* args,
+    JValue* pResult)
+{
+    returnCString(pResult, gDvm.classPathStr);
+}
+
+static void Dalvik_dalvik_system_VMRuntime_vmVersion(const u4* args,
+    JValue* pResult)
+{
+    char buf[64];
+    sprintf(buf, "%d.%d.%d",
+            DALVIK_MAJOR_VERSION, DALVIK_MINOR_VERSION, DALVIK_BUG_VERSION);
+    returnCString(pResult, buf);
+}
+
 const DalvikNativeMethod dvm_dalvik_system_VMRuntime[] = {
+    { "addressOf", "(Ljava/lang/Object;)J",
+        Dalvik_dalvik_system_VMRuntime_addressOf },
+    { "bootClassPath", "()Ljava/lang/String;",
+        Dalvik_dalvik_system_VMRuntime_bootClassPath },
+    { "classPath", "()Ljava/lang/String;",
+        Dalvik_dalvik_system_VMRuntime_classPath },
+    { "clearGrowthLimit", "()V",
+        Dalvik_dalvik_system_VMRuntime_clearGrowthLimit },
+    { "disableJitCompilation", "()V",
+        Dalvik_dalvik_system_VMRuntime_disableJitCompilation },
     { "getTargetHeapUtilization", "()F",
         Dalvik_dalvik_system_VMRuntime_getTargetHeapUtilization },
     { "nativeSetTargetHeapUtilization", "(F)V",
         Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapUtilization },
-    { "gcSoftReferences", "()V",
-        Dalvik_dalvik_system_VMRuntime_gcSoftReferences },
+    { "newNonMovableArray", "(Ljava/lang/Class;I)Ljava/lang/Object;",
+        Dalvik_dalvik_system_VMRuntime_newNonMovableArray },
+    { "properties", "()[Ljava/lang/String;",
+        Dalvik_dalvik_system_VMRuntime_properties },
     { "runFinalizationSync", "()V",
         Dalvik_dalvik_system_VMRuntime_runFinalizationSync },
     { "startJitCompilation", "()V",
         Dalvik_dalvik_system_VMRuntime_startJitCompilation },
-    { "disableJitCompilation", "()V",
-        Dalvik_dalvik_system_VMRuntime_disableJitCompilation },
-    { "newNonMovableArray", "(Ljava/lang/Class;I)Ljava/lang/Object;",
-        Dalvik_dalvik_system_VMRuntime_newNonMovableArray },
-    { "addressOf", "(Ljava/lang/Object;)J",
-        Dalvik_dalvik_system_VMRuntime_addressOf },
-    { "clearGrowthLimit", "()V",
-        Dalvik_dalvik_system_VMRuntime_clearGrowthLimit },
+    { "vmVersion", "()Ljava/lang/String;",
+        Dalvik_dalvik_system_VMRuntime_vmVersion },
     { NULL, NULL, NULL },
 };
diff --git a/vm/native/dalvik_system_VMStack.c b/vm/native/dalvik_system_VMStack.c
index 8db4a6b..8891933 100644
--- a/vm/native/dalvik_system_VMStack.c
+++ b/vm/native/dalvik_system_VMStack.c
@@ -71,7 +71,7 @@
 }
 
 /*
- * public static Class<?>[] getClasses(int maxDepth, boolean stopAtPrivileged)
+ * public static Class<?>[] getClasses(int maxDepth)
  *
  * Create an array of classes for the methods on the stack, skipping the
  * first two and all reflection methods.  If "stopAtPrivileged" is set,
@@ -82,7 +82,6 @@
 {
     /* note "maxSize" is unsigned, so -1 turns into a very large value */
     unsigned int maxSize = args[0];
-    bool stopAtPrivileged = args[1];
     unsigned int size = 0;
     const unsigned int kSkip = 2;
     const Method** methods = NULL;
@@ -95,7 +94,7 @@
             &methodCount))
     {
         LOGE("Failed to create stack trace array\n");
-        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        dvmThrowInternalError(NULL);
         RETURN_VOID();
     }
 
@@ -116,16 +115,6 @@
         if (dvmIsReflectionMethod(meth))
             continue;
 
-        if (stopAtPrivileged && dvmIsPrivilegedMethod(meth)) {
-            /*
-             * We want the last element of the array to be the caller of
-             * the privileged method, so we want to include the privileged
-             * method and the next one.
-             */
-            if (maxSize > size + 2)
-                maxSize = size + 2;
-        }
-
         size++;
     }
 
@@ -227,7 +216,7 @@
         Dalvik_dalvik_system_VMStack_getCallingClassLoader2 },
     { "getStackClass2", "()Ljava/lang/Class;",
         Dalvik_dalvik_system_VMStack_getStackClass2 },
-    { "getClasses",             "(IZ)[Ljava/lang/Class;",
+    { "getClasses",             "(I)[Ljava/lang/Class;",
         Dalvik_dalvik_system_VMStack_getClasses },
     { "getThreadStackTrace",    "(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;",
         Dalvik_dalvik_system_VMStack_getThreadStackTrace },
diff --git a/vm/native/dalvik_system_Zygote.c b/vm/native/dalvik_system_Zygote.c
index c568005..905eb9a 100644
--- a/vm/native/dalvik_system_Zygote.c
+++ b/vm/native/dalvik_system_Zygote.c
@@ -167,7 +167,7 @@
     }
 
     /* just in case gid_t and u4 are different... */
-    gids = alloca(sizeof(gid_t) * gidArray->length);
+    gids = (gid_t *)alloca(sizeof(gid_t) * gidArray->length);
     contents = (s4 *)gidArray->contents;
 
     for (i = 0 ; i < gidArray->length ; i++) {
@@ -227,7 +227,7 @@
     pid_t pid;
 
     if (!gDvm.zygote) {
-        dvmThrowException("Ljava/lang/IllegalStateException;",
+        dvmThrowIllegalStateException(
             "VM instance not started with -Xzygote");
 
         RETURN_VOID();
@@ -380,7 +380,7 @@
     }
 
     if (!gDvm.zygote) {
-        dvmThrowException("Ljava/lang/IllegalStateException;",
+        dvmThrowIllegalStateException(
             "VM instance not started with -Xzygote");
 
         return -1;
diff --git a/vm/native/java_lang_Class.c b/vm/native/java_lang_Class.c
index 1cd31df..4bda347 100644
--- a/vm/native/java_lang_Class.c
+++ b/vm/native/java_lang_Class.c
@@ -428,7 +428,7 @@
     ClassObject* testClass = (ClassObject*) args[1];
 
     if (testClass == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_INT(false);
     }
     RETURN_INT(dvmInstanceof(testClass, thisPtr));
@@ -491,8 +491,7 @@
         LOGD("newInstance failed: p%d i%d [%d a%d\n",
             dvmIsPrimitiveClass(clazz), dvmIsInterfaceClass(clazz),
             dvmIsArrayClass(clazz), dvmIsAbstractClass(clazz));
-        dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationException;",
-            clazz->descriptor);
+        dvmThrowInstantiationException(clazz, NULL);
         RETURN_VOID();
     }
 
@@ -511,8 +510,7 @@
     if (init == NULL) {
         /* common cause: secret "this" arg on non-static inner class ctor */
         LOGD("newInstance failed: no <init>()\n");
-        dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationException;",
-            clazz->descriptor);
+        dvmThrowInstantiationException(clazz, "no empty constructor");
         RETURN_VOID();
     }
 
@@ -531,15 +529,13 @@
     if (!dvmCheckClassAccess(callerClass, clazz)) {
         LOGD("newInstance failed: %s not accessible to %s\n",
             clazz->descriptor, callerClass->descriptor);
-        dvmThrowException("Ljava/lang/IllegalAccessException;",
-            "access to class not allowed");
+        dvmThrowIllegalAccessException("access to class not allowed");
         RETURN_VOID();
     }
     if (!dvmCheckMethodAccess(callerClass, init)) {
         LOGD("newInstance failed: %s.<init>() not accessible to %s\n",
             clazz->descriptor, callerClass->descriptor);
-        dvmThrowException("Ljava/lang/IllegalAccessException;",
-            "access to constructor not allowed");
+        dvmThrowIllegalAccessException("access to constructor not allowed");
         RETURN_VOID();
     }
 
@@ -644,8 +640,7 @@
 static void Dalvik_java_lang_Class_getGenericInterfaces(const u4* args,
     JValue* pResult)
 {
-    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
-        "native method not implemented");
+    dvmThrowUnsupportedOperationException("native method not implemented");
 
     RETURN_PTR(NULL);
 }
@@ -653,8 +648,7 @@
 static void Dalvik_java_lang_Class_getGenericSuperclass(const u4* args,
     JValue* pResult)
 {
-    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
-        "native method not implemented");
+    dvmThrowUnsupportedOperationException("native method not implemented");
 
     RETURN_PTR(NULL);
 }
@@ -662,8 +656,7 @@
 static void Dalvik_java_lang_Class_getTypeParameters(const u4* args,
     JValue* pResult)
 {
-    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
-        "native method not implemented");
+    dvmThrowUnsupportedOperationException("native method not implemented");
 
     RETURN_PTR(NULL);
 }
diff --git a/vm/native/java_lang_Double.c b/vm/native/java_lang_Double.c
new file mode 100644
index 0000000..b019c8c
--- /dev/null
+++ b/vm/native/java_lang_Double.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "native/InternalNativePriv.h"
+
+static void Double_doubleToLongBits(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangDouble_doubleToLongBits);
+}
+
+static void Double_doubleToRawLongBits(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangDouble_doubleToRawLongBits);
+}
+
+static void Double_longBitsToDouble(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangDouble_longBitsToDouble);
+}
+
+const DalvikNativeMethod dvm_java_lang_Double[] = {
+    { "doubleToLongBits",    "(D)J", Double_doubleToLongBits },
+    { "doubleToRawLongBits", "(D)J", Double_doubleToRawLongBits },
+    { "longBitsToDouble",    "(J)D", Double_longBitsToDouble },
+    { NULL, NULL, NULL },
+};
diff --git a/vm/native/java_lang_Float.c b/vm/native/java_lang_Float.c
new file mode 100644
index 0000000..e99e4aa
--- /dev/null
+++ b/vm/native/java_lang_Float.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "native/InternalNativePriv.h"
+
+static void Float_floatToIntBits(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangFloat_floatToIntBits);
+}
+
+static void Float_floatToRawIntBits(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangFloat_floatToRawIntBits);
+}
+
+static void Float_intBitsToFloat(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangFloat_intBitsToFloat);
+}
+
+const DalvikNativeMethod dvm_java_lang_Float[] = {
+    { "floatToIntBits",    "(F)I", Float_floatToIntBits },
+    { "floatToRawIntBits", "(F)I", Float_floatToRawIntBits },
+    { "intBitsToFloat",    "(I)F", Float_intBitsToFloat },
+    { NULL, NULL, NULL },
+};
diff --git a/vm/native/java_lang_Math.c b/vm/native/java_lang_Math.c
new file mode 100644
index 0000000..7c17242
--- /dev/null
+++ b/vm/native/java_lang_Math.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "native/InternalNativePriv.h"
+
+static void Math_absD(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_abs_double);
+}
+
+static void Math_absF(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_abs_float);
+}
+
+static void Math_absI(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_abs_int);
+}
+
+static void Math_absJ(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_abs_long);
+}
+
+static void Math_cos(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_cos);
+}
+
+static void Math_maxI(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_max_int);
+}
+
+static void Math_minI(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_min_int);
+}
+
+static void Math_sin(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_sin);
+}
+
+static void Math_sqrt(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangMath_sqrt);
+}
+
+const DalvikNativeMethod dvm_java_lang_Math[] = {
+    { "abs",  "(D)D",  Math_absD },
+    { "abs",  "(F)F",  Math_absF },
+    { "abs",  "(I)I",  Math_absI },
+    { "abs",  "(J)J",  Math_absJ },
+    { "cos",  "(D)D",  Math_cos },
+    { "max",  "(II)I", Math_maxI },
+    { "min",  "(II)I", Math_minI },
+    { "sin",  "(D)D",  Math_sin },
+    { "sqrt", "(D)D",  Math_sqrt },
+    { NULL, NULL, NULL },
+};
diff --git a/vm/native/java_lang_Object.c b/vm/native/java_lang_Object.c
index f2adf52..12f701f 100644
--- a/vm/native/java_lang_Object.c
+++ b/vm/native/java_lang_Object.c
@@ -30,9 +30,8 @@
     JValue* pResult)
 {
     Object* thisPtr = (Object*) args[0];
-    Object* clone = dvmCloneObject(thisPtr);
+    Object* clone = dvmCloneObject(thisPtr, ALLOC_DONT_TRACK);
 
-    dvmReleaseTrackedAlloc(clone, NULL);
     RETURN_PTR(clone);
 }
 
diff --git a/vm/native/java_lang_Runtime.c b/vm/native/java_lang_Runtime.c
index 90df259..2ac50ee 100644
--- a/vm/native/java_lang_Runtime.c
+++ b/vm/native/java_lang_Runtime.c
@@ -31,7 +31,7 @@
 {
     UNUSED_PARAMETER(args);
 
-    dvmCollectGarbage(false);
+    dvmCollectGarbage();
     RETURN_VOID();
 }
 
diff --git a/vm/native/java_lang_String.c b/vm/native/java_lang_String.c
index b3cb7ec..38f9e31 100644
--- a/vm/native/java_lang_String.c
+++ b/vm/native/java_lang_String.c
@@ -20,23 +20,50 @@
 #include "Dalvik.h"
 #include "native/InternalNativePriv.h"
 
+static void String_charAt(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_charAt);
+}
 
-/*
- * public String intern()
- *
- * Intern a string in the VM string table.
- */
-static void Dalvik_java_lang_String_intern(const u4* args, JValue* pResult)
+static void String_compareTo(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_compareTo);
+}
+
+static void String_equals(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_equals);
+}
+
+static void String_fastIndexOf(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_fastIndexOf_II);
+}
+
+static void String_intern(const u4* args, JValue* pResult)
 {
     StringObject* str = (StringObject*) args[0];
-    StringObject* interned;
-
-    interned = dvmLookupInternedString(str);
+    StringObject* interned = dvmLookupInternedString(str);
     RETURN_PTR(interned);
 }
 
+static void String_isEmpty(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_isEmpty);
+}
+
+static void String_length(const u4* args, JValue* pResult)
+{
+    MAKE_INTRINSIC_TRAMPOLINE(javaLangString_length);
+}
+
 const DalvikNativeMethod dvm_java_lang_String[] = {
-    { "intern",             "()Ljava/lang/String;",
-        Dalvik_java_lang_String_intern },
+    { "charAt",      "(I)C",                  String_charAt },
+    { "compareTo",   "(Ljava/lang/String;)I", String_compareTo },
+    { "equals",      "(Ljava/lang/Object;)Z", String_equals },
+    { "fastIndexOf", "(II)I",                 String_fastIndexOf },
+    { "intern",      "()Ljava/lang/String;",  String_intern },
+    { "isEmpty",     "()Z",                   String_isEmpty },
+    { "length",      "()I",                   String_length },
     { NULL, NULL, NULL },
 };
diff --git a/vm/native/java_lang_System.c b/vm/native/java_lang_System.c
index 0ac1746..5402a7b 100644
--- a/vm/native/java_lang_System.c
+++ b/vm/native/java_lang_System.c
@@ -127,17 +127,16 @@
     dstPos = args[3];
     length = args[4];
 
-    /* check for null or bad pointer */
-    if (!dvmValidateObject((Object*)srcArray) ||
-        !dvmValidateObject((Object*)dstArray))
-    {
+    /* check for null pointer */
+    if ((Object*)srcArray == NULL || (Object*)dstArray == NULL) {
+        dvmThrowNullPointerException(NULL);
         assert(dvmCheckException(dvmThreadSelf()));
         RETURN_VOID();
     }
 
     /* make sure it's an array */
     if (!dvmIsArray(srcArray) || !dvmIsArray(dstArray)) {
-        dvmThrowExceptionFmt("Ljava/lang/ArrayStoreException;",
+        dvmThrowExceptionFmt(gDvm.exArrayStoreException,
             "source and destination must be arrays, but were %s and %s",
             ((Object*)srcArray)->clazz->descriptor,
             ((Object*)dstArray)->clazz->descriptor);
@@ -149,7 +148,7 @@
         srcPos > (int) srcArray->length - length ||
         dstPos > (int) dstArray->length - length)
     {
-        dvmThrowExceptionFmt("Ljava/lang/ArrayIndexOutOfBoundsException;",
+        dvmThrowExceptionFmt(gDvm.exArrayIndexOutOfBoundsException,
             "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
             srcArray->length, srcPos, dstArray->length, dstPos, length);
         RETURN_VOID();
@@ -168,7 +167,7 @@
     dstPrim = (dstType != '[' && dstType != 'L');
     if (srcPrim || dstPrim) {
         if (srcPrim != dstPrim || srcType != dstType) {
-            dvmThrowExceptionFmt("Ljava/lang/ArrayStoreException;",
+            dvmThrowExceptionFmt(gDvm.exArrayStoreException,
                 "source and destination arrays are incompatible: %s and %s",
                 srcClass->descriptor, dstClass->descriptor);
             RETURN_VOID();
@@ -284,7 +283,7 @@
                 copyCount * width);
             dvmWriteBarrierArray(dstArray, 0, copyCount);
             if (copyCount != length) {
-                dvmThrowExceptionFmt("Ljava/lang/ArrayStoreException;",
+                dvmThrowExceptionFmt(gDvm.exArrayStoreException,
                     "source[%d] of type %s cannot be stored in destination array of type %s",
                     copyCount, srcObj[copyCount]->clazz->descriptor,
                     dstClass->descriptor);
@@ -356,7 +355,7 @@
     char* mappedName;
 
     if (nameObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
@@ -377,11 +376,11 @@
         Dalvik_java_lang_System_arraycopy },
     { "currentTimeMillis",  "()J",
         Dalvik_java_lang_System_currentTimeMillis },
-    { "nanoTime",  "()J",
-        Dalvik_java_lang_System_nanoTime },
     { "identityHashCode",  "(Ljava/lang/Object;)I",
         Dalvik_java_lang_System_identityHashCode },
     { "mapLibraryName",     "(Ljava/lang/String;)Ljava/lang/String;",
         Dalvik_java_lang_System_mapLibraryName },
+    { "nanoTime",  "()J",
+        Dalvik_java_lang_System_nanoTime },
     { NULL, NULL, NULL },
 };
diff --git a/vm/native/java_lang_SystemProperties.c b/vm/native/java_lang_SystemProperties.c
deleted file mode 100644
index bbcf25e..0000000
--- a/vm/native/java_lang_SystemProperties.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * java.lang.SystemProperties
- */
-#include "Dalvik.h"
-#include "native/InternalNativePriv.h"
-
-
-/*
- * Expected call sequence:
- *  (1) call SystemProperties.preInit() to get VM defaults
- *  (2) set any higher-level defaults
- *  (3) call SystemProperties.postInit() to get command-line overrides
- * This currently happens the first time somebody tries to access a property.
- *
- * SystemProperties is a Dalvik-specific package-scope class.
- */
-
-/*
- * void preInit()
- *
- * Tells the VM to populate the properties table with VM defaults.
- */
-static void Dalvik_java_lang_SystemProperties_preInit(const u4* args,
-    JValue* pResult)
-{
-    dvmCreateDefaultProperties((Object*) args[0]);
-    RETURN_VOID();
-}
-
-/*
- * void postInit()
- *
- * Tells the VM to update properties with values from the command line.
- */
-static void Dalvik_java_lang_SystemProperties_postInit(const u4* args,
-    JValue* pResult)
-{
-    dvmSetCommandLineProperties((Object*) args[0]);
-    RETURN_VOID();
-}
-
-const DalvikNativeMethod dvm_java_lang_SystemProperties[] = {
-    { "preInit",            "()V",
-        Dalvik_java_lang_SystemProperties_preInit },
-    { "postInit",           "()V",
-        Dalvik_java_lang_SystemProperties_postInit },
-    { NULL, NULL, NULL },
-};
diff --git a/vm/native/java_lang_VMClassLoader.c b/vm/native/java_lang_VMClassLoader.c
index e8dbc6e..1cbe6e2 100644
--- a/vm/native/java_lang_VMClassLoader.c
+++ b/vm/native/java_lang_VMClassLoader.c
@@ -42,7 +42,7 @@
     name = dvmCreateCstrFromString(nameObj);
     LOGE("ERROR: defineClass(%p, %s, %p, %d, %d, %p)\n",
         loader, name, data, offset, len, pd);
-    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+    dvmThrowUnsupportedOperationException(
         "can't load this type of class file");
 
     free(name);
@@ -68,7 +68,7 @@
 
     LOGE("ERROR: defineClass(%p, %p, %d, %d, %p)\n",
         loader, data, offset, len, pd);
-    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+    dvmThrowUnsupportedOperationException(
         "can't load this type of class file");
 
     RETURN_VOID();
@@ -87,7 +87,7 @@
     char* descriptor = NULL;
 
     if (nameObj == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         goto bail;
     }
 
diff --git a/vm/native/java_lang_VMThread.c b/vm/native/java_lang_VMThread.c
index 3b7331b..0a02020 100644
--- a/vm/native/java_lang_VMThread.c
+++ b/vm/native/java_lang_VMThread.c
@@ -85,7 +85,7 @@
     Thread* thread;
 
     if (object == NULL) {
-        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        dvmThrowNullPointerException(NULL);
         RETURN_VOID();
     }
 
diff --git a/vm/native/java_lang_reflect_Array.c b/vm/native/java_lang_reflect_Array.c
index e7713f6..bc7e5ec 100644
--- a/vm/native/java_lang_reflect_Array.c
+++ b/vm/native/java_lang_reflect_Array.c
@@ -36,7 +36,7 @@
 
     assert(elementClass != NULL);       // tested by caller
     if (length < 0) {
-        dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+        dvmThrowNegativeArraySizeException(length);
         RETURN_VOID();
     }
 
@@ -91,7 +91,7 @@
     dimensions = (int*) dimArray->contents;
     for (i = 0; i < numDim; i++) {
         if (dimensions[i] < 0) {
-            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            dvmThrowNegativeArraySizeException(dimensions[i]);
             RETURN_VOID();
         }
         LOGVV("DIM %d: %d\n", i, dimensions[i]);
diff --git a/vm/native/java_lang_reflect_Constructor.c b/vm/native/java_lang_reflect_Constructor.c
index 76f69a9..e9ad354 100644
--- a/vm/native/java_lang_reflect_Constructor.c
+++ b/vm/native/java_lang_reflect_Constructor.c
@@ -59,8 +59,7 @@
     Method* meth;
 
     if (dvmIsAbstractClass(declaringClass)) {
-        dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationException;",
-            declaringClass->descriptor);
+        dvmThrowInstantiationException(declaringClass, NULL);
         RETURN_VOID();
     }
 
diff --git a/vm/native/java_lang_reflect_Field.c b/vm/native/java_lang_reflect_Field.c
index 15cb84a..2713d8c 100644
--- a/vm/native/java_lang_reflect_Field.c
+++ b/vm/native/java_lang_reflect_Field.c
@@ -66,8 +66,7 @@
     /* verify access */
     if (!noAccessCheck) {
         if (isSetOperation && dvmIsFinalField(field)) {
-            dvmThrowException("Ljava/lang/IllegalAccessException;",
-                "field is marked 'final'");
+            dvmThrowIllegalAccessException("field is marked 'final'");
             return NULL;
         }
 
@@ -86,8 +85,7 @@
          * in arbitrary Foo objects from other packages.
          */
         if (!dvmCheckFieldAccess(callerClass, field)) {
-            dvmThrowException("Ljava/lang/IllegalAccessException;",
-                "access to field not allowed");
+            dvmThrowIllegalAccessException("access to field not allowed");
             return NULL;
         }
         if (dvmIsProtectedField(field)) {
@@ -100,7 +98,7 @@
             samePackage = dvmInSamePackage(declaringClass, callerClass);
 
             if (!isInstance && !samePackage) {
-                dvmThrowException("Ljava/lang/IllegalAccessException;",
+                dvmThrowIllegalAccessException(
                     "access to protected field not allowed");
                 return NULL;
             }
@@ -284,7 +282,7 @@
         switch (sfield->field.signature[0]) {
         case 'L':
         case '[':
-            dvmSetStaticFieldObject(sfield, value->l);
+            dvmSetStaticFieldObject(sfield, (Object*)value->l);
             break;
         default:
             /* just copy the whole thing */
@@ -320,7 +318,7 @@
             break;
         case 'L':
         case '[':
-            dvmSetStaticFieldObjectVolatile(sfield, value->l);
+            dvmSetStaticFieldObjectVolatile(sfield, (Object*)value->l);
             break;
         default:
             LOGE("Unhandled field signature '%s'\n", sfield->field.signature);
@@ -365,7 +363,7 @@
             break;
         case 'L':
         case '[':
-            dvmSetFieldObject(obj, ifield->byteOffset, value->l);
+            dvmSetFieldObject(obj, ifield->byteOffset, (Object *)value->l);
             break;
         default:
             LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
@@ -409,7 +407,7 @@
             break;
         case 'L':
         case '[':
-            dvmSetFieldObjectVolatile(obj, ifield->byteOffset, value->l);
+            dvmSetFieldObjectVolatile(obj, ifield->byteOffset, (Object*)value->l);
             break;
         default:
             LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
@@ -503,8 +501,7 @@
 
     /* unbox primitive, or verify object type */
     if (!dvmUnboxPrimitive(valueObj, fieldType, &value)) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "invalid value for field");
+        dvmThrowIllegalArgumentException("invalid value for field");
         RETURN_VOID();
     }
 
@@ -554,8 +551,7 @@
     JValue value;
 
     if (!dvmIsPrimitiveClass(fieldType)) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "not a primitive field");
+        dvmThrowIllegalArgumentException("not a primitive field");
         RETURN_VOID();
     }
 
@@ -570,8 +566,7 @@
     if (dvmConvertPrimitiveValue(fieldType->primitiveType, targetType,
         &(value.i), &(pResult->i)) < 0)
     {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "invalid primitive conversion");
+        dvmThrowIllegalArgumentException("invalid primitive conversion");
         RETURN_VOID();
     }
 }
@@ -599,8 +594,7 @@
     JValue value;
 
     if (!dvmIsPrimitiveClass(fieldType)) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "not a primitive field");
+        dvmThrowIllegalArgumentException("not a primitive field");
         RETURN_VOID();
     }
 
@@ -608,8 +602,7 @@
     if (dvmConvertPrimitiveValue(srcType, fieldType->primitiveType,
         valuePtr, &(value.i)) < 0)
     {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "invalid primitive conversion");
+        dvmThrowIllegalArgumentException("invalid primitive conversion");
         RETURN_VOID();
     }
 
diff --git a/vm/native/java_security_AccessController.c b/vm/native/java_security_AccessController.c
index 378fb94..97986cb 100644
--- a/vm/native/java_security_AccessController.c
+++ b/vm/native/java_security_AccessController.c
@@ -42,7 +42,7 @@
     if (!dvmCreateStackTraceArray(dvmThreadSelf()->curFrame, &methods, &length))
     {
         LOGE("Failed to create stack trace array\n");
-        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        dvmThrowInternalError(NULL);
         RETURN_VOID();
     }
 
@@ -70,7 +70,7 @@
     if (subSet == NULL) {
         LOGE("Failed to allocate subSet (length=%d)\n", length);
         free(methods);
-        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        dvmThrowInternalError(NULL);
         RETURN_VOID();
     }
     int idx, subIdx = 0;
@@ -81,15 +81,6 @@
         if (dvmIsReflectionMethod(meth))
             continue;
 
-        if (dvmIsPrivilegedMethod(meth)) {
-            /* find nearest non-reflection frame; note we skip priv frame */
-            //LOGI("GSD priv frame at %s.%s\n", meth->clazz->name, meth->name);
-            while (++idx < length && dvmIsReflectionMethod(methods[idx]))
-                ;
-            length = idx;       // stomp length to end loop
-            meth = methods[idx];
-        }
-
         /* get the pd object from the method's class */
         assert(gDvm.offJavaLangClass_pd != 0);
         pd = dvmGetFieldObject((Object*) meth->clazz,
diff --git a/vm/oo/Array.c b/vm/oo/Array.c
index bc57a5a..99630ca 100644
--- a/vm/oo/Array.c
+++ b/vm/oo/Array.c
@@ -20,10 +20,7 @@
 
 #include <stdlib.h>
 #include <stddef.h>
-
-#if WITH_HPROF_STACK
-#include "hprof/Hprof.h"
-#endif
+#include <limits.h>
 
 static ClassObject* createArrayClass(const char* descriptor, Object* loader);
 static ClassObject* createPrimitiveClass(int idx);
@@ -41,36 +38,30 @@
 ArrayObject* dvmAllocArray(ClassObject* arrayClass, size_t length,
     size_t elemWidth, int allocFlags)
 {
-    ArrayObject* newArray;
-    size_t size;
-
+    assert(arrayClass != NULL);
+    assert(arrayClass->descriptor != NULL);
     assert(arrayClass->descriptor[0] == '[');
-
-    if (length > 0x0fffffff) {
-        /* too large and (length * elemWidth) will overflow 32 bits */
-        LOGE("Rejecting allocation of %u-element array\n", length);
-        dvmThrowBadAllocException("array size too large");
+    assert(length <= 0x7fffffff);
+    assert(elemWidth > 0);
+    assert(elemWidth <= 8);
+    assert((elemWidth & (elemWidth - 1)) == 0);
+    size_t elementShift = sizeof(size_t) * CHAR_BIT - 1 - CLZ(elemWidth);
+    size_t elementSize = length << elementShift;
+    size_t headerSize = offsetof(ArrayObject, contents);
+    size_t totalSize = elementSize + headerSize;
+    if (elementSize >> elementShift != length || totalSize < elementSize) {
+        char *descriptor = dvmHumanReadableDescriptor(arrayClass->descriptor);
+        dvmThrowExceptionFmt(gDvm.exOutOfMemoryError,
+                "%s of length %zd exceeds the VM limit", descriptor, length);
+        free(descriptor);
         return NULL;
     }
-
-    size = offsetof(ArrayObject, contents);
-    size += length * elemWidth;
-
-    /* Note that we assume that the Array class does not
-     * override finalize().
-     */
-    newArray = dvmMalloc(size, allocFlags);
+    ArrayObject* newArray = (ArrayObject*)dvmMalloc(totalSize, allocFlags);
     if (newArray != NULL) {
         DVM_OBJECT_INIT(&newArray->obj, arrayClass);
         newArray->length = length;
-        LOGVV("AllocArray: %s [%d] (%d)\n",
-            arrayClass->descriptor, (int) length, (int) size);
-#if WITH_HPROF_STACK
-        hprofFillInStackTrace(&newArray->obj);
-#endif
-        dvmTrackAllocation(arrayClass, size);
+        dvmTrackAllocation(arrayClass, totalSize);
     }
-    /* the caller must call dvmReleaseTrackedAlloc */
     return newArray;
 }
 
@@ -149,67 +140,53 @@
  * Create a new array that holds primitive types.
  *
  * "type" is the primitive type letter, e.g. 'I' for int or 'J' for long.
- * If the array class doesn't exist, it will be created.
  */
 ArrayObject* dvmAllocPrimitiveArray(char type, size_t length, int allocFlags)
 {
     ArrayObject* newArray;
-    ClassObject** pTypeClass;
+    ClassObject* arrayClass;
     int width;
 
     switch (type) {
     case 'I':
-        pTypeClass = &gDvm.classArrayInt;
+        arrayClass = gDvm.classArrayInt;
         width = 4;
         break;
     case 'C':
-        pTypeClass = &gDvm.classArrayChar;
+        arrayClass = gDvm.classArrayChar;
         width = 2;
         break;
     case 'B':
-        pTypeClass = &gDvm.classArrayByte;
+        arrayClass = gDvm.classArrayByte;
         width = 1;
         break;
     case 'Z':
-        pTypeClass = &gDvm.classArrayBoolean;
+        arrayClass = gDvm.classArrayBoolean;
         width = 1; /* special-case this? */
         break;
     case 'F':
-        pTypeClass = &gDvm.classArrayFloat;
+        arrayClass = gDvm.classArrayFloat;
         width = 4;
         break;
     case 'D':
-        pTypeClass = &gDvm.classArrayDouble;
+        arrayClass = gDvm.classArrayDouble;
         width = 8;
         break;
     case 'S':
-        pTypeClass = &gDvm.classArrayShort;
+        arrayClass = gDvm.classArrayShort;
         width = 2;
         break;
     case 'J':
-        pTypeClass = &gDvm.classArrayLong;
+        arrayClass = gDvm.classArrayLong;
         width = 8;
         break;
     default:
-        LOGE("Unknown type '%c'\n", type);
-        assert(false);
-        return NULL;
+        LOGE("Unknown primitive type '%c'\n", type);
+        dvmAbort();
+        return NULL; // Keeps the compiler happy.
     }
 
-    if (*pTypeClass == NULL) {
-        char typeClassName[3] = "[x";
-
-        typeClassName[1] = type;
-
-        *pTypeClass = dvmFindArrayClass(typeClassName, NULL);
-        if (*pTypeClass == NULL) {
-            LOGE("ERROR: failed to generate array class for '%s'\n",
-                typeClassName);
-            return NULL;
-        }
-    }
-
-    newArray = dvmAllocArray(*pTypeClass, length, width, allocFlags);
+    newArray = dvmAllocArray(arrayClass, length, width, allocFlags);
 
     /* the caller must dvmReleaseTrackedAlloc if allocFlags==ALLOC_DEFAULT */
     return newArray;
@@ -437,9 +414,6 @@
                       (Object *)elementClass->classLoader);
     newClass->arrayDim = arrayDim;
     newClass->status = CLASS_INITIALIZED;
-#if WITH_HPROF_STACK
-    hprofFillInStackTrace(newClass);
-#endif
 
     /* don't need to set newClass->objectSize */
 
@@ -473,7 +447,7 @@
         LOGE("Unable to create array class '%s': missing interfaces\n",
             descriptor);
         dvmFreeClassInnards(newClass);
-        dvmThrowException("Ljava/lang/InternalError;", "missing array ifaces");
+        dvmThrowInternalError("missing array ifaces");
         dvmReleaseTrackedAlloc((Object*) newClass, NULL);
         return NULL;
     }
@@ -644,9 +618,6 @@
     newClass->descriptor = kClassDescriptors[idx];
     //newClass->super = gDvm.classJavaLangObject;
     newClass->status = CLASS_INITIALIZED;
-#if WITH_HPROF_STACK
-    hprofFillInStackTrace(newClass);
-#endif
 
     /* don't need to set newClass->objectSize */
 
@@ -719,7 +690,7 @@
         case PRIM_BOOLEAN:
         case PRIM_BYTE:
             {
-                u1* tmp = dst;
+                u1* tmp = (u1*)dst;
                 *tmp++ = result.b;
                 dst = tmp;
             }
@@ -727,7 +698,7 @@
         case PRIM_CHAR:
         case PRIM_SHORT:
             {
-                u2* tmp = dst;
+                u2* tmp = (u2*)dst;
                 *tmp++ = result.s;
                 dst = tmp;
             }
@@ -735,7 +706,7 @@
         case PRIM_FLOAT:
         case PRIM_INT:
             {
-                u4* tmp = dst;
+                u4* tmp = (u4*)dst;
                 *tmp++ = result.i;
                 dst = tmp;
             }
@@ -743,7 +714,7 @@
         case PRIM_DOUBLE:
         case PRIM_LONG:
             {
-                u8* tmp = dst;
+                u8* tmp = (u8*)dst;
                 *tmp++ = result.j;
                 dst = tmp;
             }
diff --git a/vm/oo/Class.c b/vm/oo/Class.c
index 6e16adc..45f1016 100644
--- a/vm/oo/Class.c
+++ b/vm/oo/Class.c
@@ -224,66 +224,66 @@
 static void linearAllocTests()
 {
     char* fiddle;
-    int try = 1;
+    int test = 1;
 
-    switch (try) {
+    switch (test) {
     case 0:
-        fiddle = dvmLinearAlloc(NULL, 3200-28);
-        dvmLinearReadOnly(NULL, fiddle);
+        fiddle = (char*)dvmLinearAlloc(NULL, 3200-28);
+        dvmLinearReadOnly(NULL, (char*)fiddle);
         break;
     case 1:
-        fiddle = dvmLinearAlloc(NULL, 3200-24);
-        dvmLinearReadOnly(NULL, fiddle);
+        fiddle = (char*)dvmLinearAlloc(NULL, 3200-24);
+        dvmLinearReadOnly(NULL, (char*)fiddle);
         break;
     case 2:
-        fiddle = dvmLinearAlloc(NULL, 3200-20);
-        dvmLinearReadOnly(NULL, fiddle);
+        fiddle = (char*)dvmLinearAlloc(NULL, 3200-20);
+        dvmLinearReadOnly(NULL, (char*)fiddle);
         break;
     case 3:
-        fiddle = dvmLinearAlloc(NULL, 3200-16);
-        dvmLinearReadOnly(NULL, fiddle);
+        fiddle = (char*)dvmLinearAlloc(NULL, 3200-16);
+        dvmLinearReadOnly(NULL, (char*)fiddle);
         break;
     case 4:
-        fiddle = dvmLinearAlloc(NULL, 3200-12);
-        dvmLinearReadOnly(NULL, fiddle);
+        fiddle = (char*)dvmLinearAlloc(NULL, 3200-12);
+        dvmLinearReadOnly(NULL, (char*)fiddle);
         break;
     }
-    fiddle = dvmLinearAlloc(NULL, 896);
-    dvmLinearReadOnly(NULL, fiddle);
-    fiddle = dvmLinearAlloc(NULL, 20);      // watch addr of this alloc
-    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = (char*)dvmLinearAlloc(NULL, 896);
+    dvmLinearReadOnly(NULL, (char*)fiddle);
+    fiddle = (char*)dvmLinearAlloc(NULL, 20);      // watch addr of this alloc
+    dvmLinearReadOnly(NULL, (char*)fiddle);
 
-    fiddle = dvmLinearAlloc(NULL, 1);
+    fiddle = (char*)dvmLinearAlloc(NULL, 1);
     fiddle[0] = 'q';
     dvmLinearReadOnly(NULL, fiddle);
-    fiddle = dvmLinearAlloc(NULL, 4096);
+    fiddle = (char*)dvmLinearAlloc(NULL, 4096);
     fiddle[0] = 'x';
     fiddle[4095] = 'y';
     dvmLinearReadOnly(NULL, fiddle);
     dvmLinearFree(NULL, fiddle);
-    fiddle = dvmLinearAlloc(NULL, 0);
+    fiddle = (char*)dvmLinearAlloc(NULL, 0);
     dvmLinearReadOnly(NULL, fiddle);
-    fiddle = dvmLinearRealloc(NULL, fiddle, 12);
+    fiddle = (char*)dvmLinearRealloc(NULL, fiddle, 12);
     fiddle[11] = 'z';
+    dvmLinearReadOnly(NULL, (char*)fiddle);
+    fiddle = (char*)dvmLinearRealloc(NULL, fiddle, 5);
     dvmLinearReadOnly(NULL, fiddle);
-    fiddle = dvmLinearRealloc(NULL, fiddle, 5);
-    dvmLinearReadOnly(NULL, fiddle);
-    fiddle = dvmLinearAlloc(NULL, 17001);
+    fiddle = (char*)dvmLinearAlloc(NULL, 17001);
     fiddle[0] = 'x';
     fiddle[17000] = 'y';
-    dvmLinearReadOnly(NULL, fiddle);
+    dvmLinearReadOnly(NULL, (char*)fiddle);
 
-    char* str = dvmLinearStrdup(NULL, "This is a test!");
+    char* str = (char*)dvmLinearStrdup(NULL, "This is a test!");
     LOGI("GOT: '%s'\n", str);
 
     /* try to check the bounds; allocator may round allocation size up */
-    fiddle = dvmLinearAlloc(NULL, 12);
+    fiddle = (char*)dvmLinearAlloc(NULL, 12);
     LOGI("Should be 1: %d\n", dvmLinearAllocContains(fiddle, 12));
     LOGI("Should be 0: %d\n", dvmLinearAllocContains(fiddle, 13));
     LOGI("Should be 0: %d\n", dvmLinearAllocContains(fiddle - 128*1024, 1));
 
     dvmLinearAllocDump(NULL);
-    dvmLinearFree(NULL, str);
+    dvmLinearFree(NULL, (char*)str);
 }
 
 static size_t classObjectSize(size_t sfieldCount)
@@ -337,9 +337,13 @@
      * If it's NULL, we just fall back to the InitiatingLoaderList in the
      * ClassObject, so it's not fatal to fail this allocation.
      */
-    gDvm.initiatingLoaderList =
+    gDvm.initiatingLoaderList = (InitiatingLoaderList*)
         calloc(ZYGOTE_CLASS_CUTOFF, sizeof(InitiatingLoaderList));
 
+    /*
+     * Initialize the class Class. This has to be done specially, particularly
+     * because it is an instance of itself.
+     */
     gDvm.classJavaLangClass = (ClassObject*) dvmMalloc(
         classObjectSize(CLASS_SFIELD_SLOTS), ALLOC_DEFAULT);
     DVM_OBJECT_INIT(&gDvm.classJavaLangClass->obj, gDvm.classJavaLangClass);
@@ -359,27 +363,6 @@
 }
 
 /*
- * We should be able to find classes now.  Get the vtable index for
- * the class loader loadClass() method.
- *
- * This doesn't work in dexopt when operating on core.jar, because
- * there aren't any classes to load.
- */
-bool dvmBaseClassStartup(void)
-{
-    ClassObject* clClass = dvmFindSystemClassNoInit("Ljava/lang/ClassLoader;");
-    Method* meth = dvmFindVirtualMethodByDescriptor(clClass, "loadClass",
-            "(Ljava/lang/String;)Ljava/lang/Class;");
-    if (meth == NULL) {
-        LOGE("Unable to find loadClass() in java.lang.ClassLoader\n");
-        return false;
-    }
-    gDvm.voffJavaLangClassLoader_loadClass = meth->methodIndex;
-
-    return true;
-}
-
-/*
  * Clean up.
  */
 void dvmClassShutdown(void)
@@ -1059,7 +1042,7 @@
      * here, but this is an extremely rare case, and it's simpler to have
      * the wait-for-class code centralized.
      */
-    if (found != NULL && !unprepOkay && !dvmIsClassLinked(found)) {
+    if (found && !unprepOkay && !dvmIsClassLinked((ClassObject*)found)) {
         LOGV("Ignoring not-yet-ready %s, using slow path\n",
             ((ClassObject*)found)->descriptor);
         found = NULL;
@@ -1262,7 +1245,7 @@
     /* convert "Landroid/debug/Stuff;" to "android.debug.Stuff" */
     dotName = dvmDescriptorToDot(descriptor);
     if (dotName == NULL) {
-        dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
+        dvmThrowOutOfMemoryError(NULL);
         goto bail;
     }
     nameObj = dvmCreateStringFromCstr(dotName);
@@ -1296,15 +1279,13 @@
 #endif
         dvmAddTrackedAlloc(excep, self);
         dvmClearException(self);
-        dvmThrowChainedExceptionWithClassMessage(
-            "Ljava/lang/NoClassDefFoundError;", descriptor, excep);
+        dvmThrowChainedNoClassDefFoundError(descriptor, excep);
         dvmReleaseTrackedAlloc(excep, self);
         clazz = NULL;
         goto bail;
     } else if (clazz == NULL) {
         LOGW("ClassLoader returned NULL w/o exception pending\n");
-        dvmThrowException("Ljava/lang/NullPointerException;",
-            "ClassLoader returned null");
+        dvmThrowNullPointerException("ClassLoader returned null");
         goto bail;
     }
 
@@ -1441,8 +1422,7 @@
                 dvmSetException(self, gDvm.noClassDefFoundErrorObj);
             } else {
                 /* dexopt case -- can't guarantee prefab (core.jar) */
-                dvmThrowExceptionWithClassMessage(
-                    "Ljava/lang/NoClassDefFoundError;", descriptor);
+                dvmThrowNoClassDefFoundError(descriptor);
             }
             goto bail;
         }
@@ -1594,8 +1574,7 @@
             {
                 LOGW("Recursive link on class %s\n", clazz->descriptor);
                 dvmUnlockObject(self, (Object*) clazz);
-                dvmThrowExceptionWithClassMessage(
-                    "Ljava/lang/ClassCircularityError;", clazz->descriptor);
+                dvmThrowClassCircularityError(clazz->descriptor);
                 clazz = NULL;
                 goto bail;
             }
@@ -2077,7 +2056,20 @@
     meth->jniArgInfo = 0;
 
     if (dvmCompareNameDescriptorAndMethod("finalize", "()V", meth) == 0) {
-        SET_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
+        /*
+         * The Enum class declares a "final" finalize() method to
+         * prevent subclasses from introducing a finalizer.  We don't
+         * want to set the finalizable flag for Enum or its subclasses,
+         * so we check for it here.
+         *
+         * We also want to avoid setting it on Object, but it's easier
+         * to just strip that out later.
+         */
+        if (clazz->classLoader != NULL ||
+            strcmp(clazz->descriptor, "Ljava/lang/Enum;") != 0)
+        {
+            SET_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
+        }
     }
 
     pDexCode = dexGetCode(pDexFile, pDexMethod);
@@ -2289,7 +2281,6 @@
  */
 static bool precacheReferenceOffsets(ClassObject* clazz)
 {
-    Method *meth;
     int i;
 
     /* We trick the GC object scanner by not counting
@@ -2340,36 +2331,14 @@
         return false;
     }
 
-    /* Cache pretty much everything about Reference so that
-     * we don't need to call interpreted code when clearing/enqueueing
-     * references.  This is fragile, so we'll be paranoid.
+    /*
+     * Now that the above has been done, it is safe to cache
+     * info about the class.
      */
-    gDvm.classJavaLangRefReference = clazz;
-
-    gDvm.offJavaLangRefReference_referent =
-        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
-                "referent", "Ljava/lang/Object;");
-    assert(gDvm.offJavaLangRefReference_referent >= 0);
-
-    gDvm.offJavaLangRefReference_queue =
-        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
-                "queue", "Ljava/lang/ref/ReferenceQueue;");
-    assert(gDvm.offJavaLangRefReference_queue >= 0);
-
-    gDvm.offJavaLangRefReference_queueNext =
-        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
-                "queueNext", "Ljava/lang/ref/Reference;");
-    assert(gDvm.offJavaLangRefReference_queueNext >= 0);
-
-    gDvm.offJavaLangRefReference_pendingNext =
-        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
-                "pendingNext", "Ljava/lang/ref/Reference;");
-    assert(gDvm.offJavaLangRefReference_pendingNext >= 0);
-
-    /* enqueueInternal() is private and thus a direct method. */
-    meth = dvmFindDirectMethodByDescriptor(clazz, "enqueueInternal", "()Z");
-    assert(meth != NULL);
-    gDvm.methJavaLangRefReference_enqueueInternal = meth;
+    if (!dvmFindReferenceMembers(clazz)) {
+        LOGE("Trouble with Reference setup\n");
+        return false;
+    }
 
     return true;
 }
@@ -2478,7 +2447,7 @@
              */
             assert(sizeof(*interfaceIdxArray) == sizeof(*clazz->interfaces));
             size_t len = clazz->interfaceCount * sizeof(*interfaceIdxArray);
-            interfaceIdxArray = malloc(len);
+            interfaceIdxArray = (u4*)malloc(len);
             if (interfaceIdxArray == NULL) {
                 LOGW("Unable to allocate memory to link %s", clazz->descriptor);
                 goto bail;
@@ -2552,8 +2521,7 @@
                     dvmLinearReadOnly(clazz->classLoader, clazz->interfaces);
                     LOGW("Interface '%s' is not accessible to '%s'\n",
                          clazz->interfaces[i]->descriptor, clazz->descriptor);
-                    dvmThrowException("Ljava/lang/IllegalAccessError;",
-                                      "interface not accessible");
+                    dvmThrowIllegalAccessError("interface not accessible");
                     goto bail;
                 }
                 LOGVV("+++  found interface '%s'\n",
@@ -2578,8 +2546,7 @@
             /* TODO: is this invariant true for all java/lang/Objects,
              * regardless of the class loader?  For now, assume it is.
              */
-            dvmThrowException("Ljava/lang/ClassFormatError;",
-                "java.lang.Object has a superclass");
+            dvmThrowClassFormatError("java.lang.Object has a superclass");
             goto bail;
         }
 
@@ -2589,28 +2556,24 @@
         CLEAR_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
     } else {
         if (clazz->super == NULL) {
-            dvmThrowException("Ljava/lang/LinkageError;",
-                              "no superclass defined");
+            dvmThrowLinkageError("no superclass defined");
             goto bail;
         }
         /* verify */
         if (dvmIsFinalClass(clazz->super)) {
             LOGW("Superclass of '%s' is final '%s'\n",
                 clazz->descriptor, clazz->super->descriptor);
-            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                "superclass is final");
+            dvmThrowIncompatibleClassChangeError("superclass is final");
             goto bail;
         } else if (dvmIsInterfaceClass(clazz->super)) {
             LOGW("Superclass of '%s' is interface '%s'\n",
                 clazz->descriptor, clazz->super->descriptor);
-            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                "superclass is an interface");
+            dvmThrowIncompatibleClassChangeError("superclass is an interface");
             goto bail;
         } else if (!dvmCheckClassAccess(clazz, clazz->super)) {
             LOGW("Superclass of '%s' (%s) is not accessible\n",
                 clazz->descriptor, clazz->super->descriptor);
-            dvmThrowException("Ljava/lang/IllegalAccessError;",
-                "superclass not accessible");
+            dvmThrowIllegalAccessError("superclass not accessible");
             goto bail;
         }
 
@@ -2665,8 +2628,7 @@
                  * from Reference.
                  */
 //xxx is this the right exception?  better than an assertion.
-                dvmThrowException("Ljava/lang/LinkageError;",
-                    "illegal inheritance from Reference");
+                dvmThrowLinkageError("illegal inheritance from Reference");
                 goto bail;
             }
 
@@ -2733,7 +2695,7 @@
         if (strcmp(clazz->descriptor, "Ljava/lang/ref/Reference;") == 0) {
             if (!precacheReferenceOffsets(clazz)) {
                 LOGE("failed pre-caching Reference offsets\n");
-                dvmThrowException("Ljava/lang/InternalError;", NULL);
+                dvmThrowInternalError(NULL);
                 goto bail;
             }
         } else if (clazz == gDvm.classJavaLangClass) {
@@ -2775,7 +2737,7 @@
      * The class has been prepared and resolved but possibly not yet verified
      * at this point.
      */
-    if (gDvm.debuggerActive) {
+    if (DEBUGGER_ACTIVE) {
         dvmDbgPostClassPrepare(clazz);
     }
 
@@ -2783,7 +2745,7 @@
     if (!okay) {
         clazz->status = CLASS_ERROR;
         if (!dvmCheckException(dvmThreadSelf())) {
-            dvmThrowException("Ljava/lang/VirtualMachineError;", NULL);
+            dvmThrowVirtualMachineError(NULL);
         }
     }
     if (interfaceIdxArray != NULL) {
@@ -2890,8 +2852,8 @@
         if (actualCount < maxCount) {
             assert(clazz->vtable != NULL);
             dvmLinearReadOnly(clazz->classLoader, clazz->vtable);
-            clazz->vtable = dvmLinearRealloc(clazz->classLoader, clazz->vtable,
-                sizeof(*(clazz->vtable)) * actualCount);
+            clazz->vtable = (Method **)dvmLinearRealloc(clazz->classLoader,
+                clazz->vtable, sizeof(*(clazz->vtable)) * actualCount);
             if (clazz->vtable == NULL) {
                 LOGE("vtable realloc failed\n");
                 goto bail;
@@ -3001,8 +2963,7 @@
         if (!dvmIsInterfaceClass(interf)) {
             LOGW("Class '%s' implements non-interface '%s'\n",
                 clazz->descriptor, interf->descriptor);
-            dvmThrowExceptionWithClassMessage(
-                "Ljava/lang/IncompatibleClassChangeError;",
+            dvmThrowIncompatibleClassChangeErrorWithClassMessage(
                 clazz->descriptor);
             goto bail;
         }
@@ -3178,7 +3139,7 @@
                     if (!dvmIsPublicMethod(clazz->vtable[j])) {
                         LOGW("Implementation of %s.%s is not public\n",
                             clazz->descriptor, clazz->vtable[j]->name);
-                        dvmThrowException("Ljava/lang/IllegalAccessError;",
+                        dvmThrowIllegalAccessError(
                             "interface implementation not public");
                         goto bail;
                     }
@@ -3194,17 +3155,19 @@
                             imeth->name, desc, clazz->descriptor);
                     free(desc);
                 }
-                //dvmThrowException("Ljava/lang/RuntimeException;", "Miranda!");
+                //dvmThrowRuntimeException("Miranda!");
                 //return false;
 
                 if (mirandaCount == mirandaAlloc) {
                     mirandaAlloc += 8;
                     if (mirandaList == NULL) {
-                        mirandaList = dvmLinearAlloc(clazz->classLoader,
+                        mirandaList = (Method**)dvmLinearAlloc(
+                                        clazz->classLoader,
                                         mirandaAlloc * sizeof(Method*));
                     } else {
                         dvmLinearReadOnly(clazz->classLoader, mirandaList);
-                        mirandaList = dvmLinearRealloc(clazz->classLoader,
+                        mirandaList = (Method**)dvmLinearRealloc(
+                                clazz->classLoader,
                                 mirandaList, mirandaAlloc * sizeof(Method*));
                     }
                     assert(mirandaList != NULL);    // mem failed + we leaked
@@ -3696,69 +3659,6 @@
 }
 
 /*
- * Throw the VM-spec-mandated error when an exception is thrown during
- * class initialization.
- *
- * The safest way to do this is to call the ExceptionInInitializerError
- * constructor that takes a Throwable.
- *
- * [Do we want to wrap it if the original is an Error rather than
- * an Exception?]
- */
-static void throwClinitError(void)
-{
-    Thread* self = dvmThreadSelf();
-    Object* exception;
-    Object* eiie;
-
-    exception = dvmGetException(self);
-    dvmAddTrackedAlloc(exception, self);
-    dvmClearException(self);
-
-    if (gDvm.classJavaLangExceptionInInitializerError == NULL) {
-        /*
-         * Always resolves to same thing -- no race condition.
-         */
-        gDvm.classJavaLangExceptionInInitializerError =
-            dvmFindSystemClass(
-                    "Ljava/lang/ExceptionInInitializerError;");
-        if (gDvm.classJavaLangExceptionInInitializerError == NULL) {
-            LOGE("Unable to prep java/lang/ExceptionInInitializerError\n");
-            goto fail;
-        }
-
-        gDvm.methJavaLangExceptionInInitializerError_init =
-            dvmFindDirectMethodByDescriptor(gDvm.classJavaLangExceptionInInitializerError,
-            "<init>", "(Ljava/lang/Throwable;)V");
-        if (gDvm.methJavaLangExceptionInInitializerError_init == NULL) {
-            LOGE("Unable to prep java/lang/ExceptionInInitializerError\n");
-            goto fail;
-        }
-    }
-
-    eiie = dvmAllocObject(gDvm.classJavaLangExceptionInInitializerError,
-                ALLOC_DEFAULT);
-    if (eiie == NULL)
-        goto fail;
-
-    /*
-     * Construct the new object, and replace the exception with it.
-     */
-    JValue unused;
-    dvmCallMethod(self, gDvm.methJavaLangExceptionInInitializerError_init,
-        eiie, &unused, exception);
-    dvmSetException(self, eiie);
-    dvmReleaseTrackedAlloc(eiie, NULL);
-    dvmReleaseTrackedAlloc(exception, self);
-    return;
-
-fail:       /* restore original exception */
-    dvmSetException(self, exception);
-    dvmReleaseTrackedAlloc(exception, self);
-    return;
-}
-
-/*
  * The class failed to initialize on a previous attempt, so we want to throw
  * a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
  * failed in verification, in which case v2 5.4.1 says we need to re-throw
@@ -3770,10 +3670,9 @@
         clazz->descriptor, clazz->verifyErrorClass);
 
     if (clazz->verifyErrorClass == NULL) {
-        dvmThrowExceptionWithClassMessage("Ljava/lang/NoClassDefFoundError;",
-            clazz->descriptor);
+        dvmThrowNoClassDefFoundError(clazz->descriptor);
     } else {
-        dvmThrowExceptionByClassWithClassMessage(clazz->verifyErrorClass,
+        dvmThrowExceptionWithClassMessage(clazz->verifyErrorClass,
             clazz->descriptor);
     }
 }
@@ -3882,8 +3781,8 @@
              * All's well, so store the value.
              */
             if (isObj) {
-                dvmSetStaticFieldObject(sfield, value.value.l);
-                dvmReleaseTrackedAlloc(value.value.l, self);
+                dvmSetStaticFieldObject(sfield, (Object*)value.value.l);
+                dvmReleaseTrackedAlloc((Object*)value.value.l, self);
             } else {
                 /*
                  * Note: This always stores the full width of a
@@ -4099,7 +3998,7 @@
                 LOGW("Method mismatch: %s in %s (cl=%p) and super %s (cl=%p)\n",
                     meth->name, clazz->descriptor, clazz->classLoader,
                     clazz->super->descriptor, clazz->super->classLoader);
-                dvmThrowException("Ljava/lang/LinkageError;",
+                dvmThrowLinkageError(
                     "Classes resolve differently in superclass");
                 return false;
             }
@@ -4133,7 +4032,7 @@
                             "iface %s (cl=%p)\n",
                         meth->name, clazz->descriptor, clazz->classLoader,
                         iface->descriptor, iface->classLoader);
-                    dvmThrowException("Ljava/lang/LinkageError;",
+                    dvmThrowLinkageError(
                         "Classes resolve differently in interface");
                     return false;
                 }
@@ -4291,8 +4190,7 @@
         clazz->status = CLASS_VERIFYING;
         if (!dvmVerifyClass(clazz)) {
 verify_failed:
-            dvmThrowExceptionWithClassMessage("Ljava/lang/VerifyError;",
-                clazz->descriptor);
+            dvmThrowVerifyError(clazz->descriptor);
             dvmSetFieldObject((Object*) clazz,
                 offsetof(ClassObject, verifyErrorClass),
                 (Object*) dvmGetException(self)->clazz);
@@ -4321,7 +4219,8 @@
     if (!IS_CLASS_FLAG_SET(clazz, CLASS_ISOPTIMIZED) && !gDvm.optimizing) {
         LOGV("+++ late optimize on %s (pv=%d)\n",
             clazz->descriptor, IS_CLASS_FLAG_SET(clazz, CLASS_ISPREVERIFIED));
-        dvmOptimizeClass(clazz, true);
+        bool essentialOnly = (gDvm.dexOptMode != OPTIMIZE_MODE_FULL);
+        dvmOptimizeClass(clazz, essentialOnly);
         SET_CLASS_FLAG(clazz, CLASS_ISOPTIMIZED);
     }
 
@@ -4371,7 +4270,7 @@
              * never happen and we don't need to fix this.
              */
             assert(false);
-            throwClinitError();
+            dvmThrowExceptionInInitializerError();
             clazz->status = CLASS_ERROR;
             goto bail_unlock;
         }
@@ -4386,7 +4285,7 @@
              * The caller wants an exception, but it was thrown in a
              * different thread.  Synthesize one here.
              */
-            dvmThrowException("Ljava/lang/UnsatisfiedLinkError;",
+            dvmThrowUnsatisfiedLinkError(
                 "(<clinit> failed, see exception in other thread)");
         }
         goto bail_unlock;
@@ -4488,7 +4387,7 @@
          */
         LOGW("Exception %s thrown while initializing %s\n",
             (dvmGetException(self)->clazz)->descriptor, clazz->descriptor);
-        throwClinitError();
+        dvmThrowExceptionInInitializerError();
         //LOGW("+++ replaced\n");
 
         dvmLockObject(self, (Object*) clazz);
@@ -4569,7 +4468,7 @@
         /* update both, ensuring that "insns" is observed first */
         method->insns = insns;
         android_atomic_release_store((int32_t) func,
-            (void*) &method->nativeFunc);
+            (volatile int32_t*)(void*) &method->nativeFunc);
     } else {
         /* only update nativeFunc */
         method->nativeFunc = func;
@@ -4613,7 +4512,7 @@
  */
 static int findClassCallback(void* vclazz, void* arg)
 {
-    ClassObject* clazz = vclazz;
+    ClassObject* clazz = (ClassObject*)vclazz;
     const char* descriptor = (const char*) arg;
 
     if (strcmp(clazz->descriptor, descriptor) == 0)
diff --git a/vm/oo/Class.h b/vm/oo/Class.h
index 9ec6f12..e27ef79 100644
--- a/vm/oo/Class.h
+++ b/vm/oo/Class.h
@@ -48,7 +48,6 @@
 } ClassPathEntry;
 
 bool dvmClassStartup(void);
-bool dvmBaseClassStartup(void);
 void dvmClassShutdown(void);
 bool dvmPrepBootClassPath(bool isNormalStart);
 
diff --git a/vm/oo/Object.c b/vm/oo/Object.c
index cca6806..00df420 100644
--- a/vm/oo/Object.c
+++ b/vm/oo/Object.c
@@ -696,7 +696,7 @@
                 break;
         }
         if (i == clazz->iftableCount) {
-            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
+            dvmThrowIncompatibleClassChangeError(
                 "invoking method from interface not implemented by class");
             return NULL;
         }
@@ -713,7 +713,7 @@
      * Make sure there's code to execute.
      */
     if (dvmIsAbstractMethod(actualMeth)) {
-        dvmThrowException("Ljava/lang/AbstractMethodError;", NULL);
+        dvmThrowAbstractMethodError(NULL);
         return NULL;
     }
     assert(!dvmIsMirandaMethod(actualMeth));
diff --git a/vm/oo/ObjectInlines.h b/vm/oo/ObjectInlines.h
index 7374a10..ef01834 100644
--- a/vm/oo/ObjectInlines.h
+++ b/vm/oo/ObjectInlines.h
@@ -26,7 +26,7 @@
  */
 INLINE void dvmSetObjectArrayElement(const ArrayObject* obj, int index,
                                      Object* val) {
-    ((Object **)(obj)->contents)[index] = val;
+    ((Object **)(void *)(obj)->contents)[index] = val;
     if (val != NULL) {
         dvmWriteBarrierArray(obj, index, index + 1);
     }
@@ -76,7 +76,7 @@
     return ((JValue*)BYTE_OFFSET(obj, offset))->d;
 }
 INLINE Object* dvmGetFieldObject(const Object* obj, int offset) {
-    return ((JValue*)BYTE_OFFSET(obj, offset))->l;
+    return (Object*)((JValue*)BYTE_OFFSET(obj, offset))->l;
 }
 INLINE bool dvmGetFieldBooleanVolatile(const Object* obj, int offset) {
     s4* ptr = &((JValue*)BYTE_OFFSET(obj, offset))->i;
@@ -105,14 +105,14 @@
     return alias.fval;
 }
 INLINE s8 dvmGetFieldLongVolatile(const Object* obj, int offset) {
-    const s8* addr = BYTE_OFFSET(obj, offset);
+    const s8* addr = (const s8*)BYTE_OFFSET(obj, offset);
     s8 val = dvmQuasiAtomicRead64(addr);
     ANDROID_MEMBAR_FULL();
     return val;
 }
 INLINE double dvmGetFieldDoubleVolatile(const Object* obj, int offset) {
     union { s8 lval; double dval; } alias;
-    const s8* addr = BYTE_OFFSET(obj, offset);
+    const s8* addr = (const s8*)BYTE_OFFSET(obj, offset);
     alias.lval = dvmQuasiAtomicRead64(addr);
     ANDROID_MEMBAR_FULL();
     return alias.dval;
@@ -147,7 +147,7 @@
     ((JValue*)BYTE_OFFSET(obj, offset))->d = val;
 }
 INLINE void dvmSetFieldObject(Object* obj, int offset, Object* val) {
-    JValue* lhs = BYTE_OFFSET(obj, offset);
+    JValue* lhs = (JValue*)BYTE_OFFSET(obj, offset);
     lhs->l = val;
     if (val != NULL) {
         dvmWriteBarrierField(obj, &lhs->l);
@@ -182,7 +182,7 @@
     dvmSetFieldIntVolatile(obj, offset, alias.ival);
 }
 INLINE void dvmSetFieldLongVolatile(Object* obj, int offset, s8 val) {
-    s8* addr = BYTE_OFFSET(obj, offset);
+    s8* addr = (s8*)BYTE_OFFSET(obj, offset);
     ANDROID_MEMBAR_STORE();
     dvmQuasiAtomicSwap64(val, addr);
     /* post-store barrier not required due to use of atomic op or mutex */
@@ -234,7 +234,7 @@
     return sfield->value.d;
 }
 INLINE Object* dvmGetStaticFieldObject(const StaticField* sfield) {
-    return sfield->value.l;
+    return (Object*)sfield->value.l;
 }
 INLINE bool dvmGetStaticFieldBooleanVolatile(const StaticField* sfield) {
     const s4* ptr = &(sfield->value.i);
diff --git a/vm/oo/Resolve.c b/vm/oo/Resolve.c
index b277576..033edc0 100644
--- a/vm/oo/Resolve.c
+++ b/vm/oo/Resolve.c
@@ -133,7 +133,7 @@
                     resClassCheck->classLoader, resClassCheck->pDvmDex);
                 LOGW("(%s had used a different %s during pre-verification)\n",
                     referrer->descriptor, resClass->descriptor);
-                dvmThrowException("Ljava/lang/IllegalAccessError;",
+                dvmThrowIllegalAccessError(
                     "Class ref in pre-verified class resolved to unexpected "
                     "implementation");
                 return NULL;
@@ -195,9 +195,8 @@
     }
     if (dvmIsInterfaceClass(resClass)) {
         /* method is part of an interface */
-        dvmThrowExceptionWithClassMessage(
-            "Ljava/lang/IncompatibleClassChangeError;",
-            resClass->descriptor);
+        dvmThrowIncompatibleClassChangeErrorWithClassMessage(
+                resClass->descriptor);
         return NULL;
     }
 
@@ -220,7 +219,7 @@
     }
 
     if (resMethod == NULL) {
-        dvmThrowException("Ljava/lang/NoSuchMethodError;", name);
+        dvmThrowNoSuchMethodError(name);
         return NULL;
     }
 
@@ -229,7 +228,7 @@
 
     /* see if this is a pure-abstract method */
     if (dvmIsAbstractMethod(resMethod) && !dvmIsAbstractClass(resClass)) {
-        dvmThrowException("Ljava/lang/AbstractMethodError;", name);
+        dvmThrowAbstractMethodError(name);
         return NULL;
     }
 
@@ -300,9 +299,8 @@
     }
     if (!dvmIsInterfaceClass(resClass)) {
         /* whoops */
-        dvmThrowExceptionWithClassMessage(
-            "Ljava/lang/IncompatibleClassChangeError;",
-            resClass->descriptor);
+        dvmThrowIncompatibleClassChangeErrorWithClassMessage(
+                resClass->descriptor);
         return NULL;
     }
 
@@ -339,7 +337,7 @@
         methodName, methodSig, resClass->descriptor);
     resMethod = dvmFindInterfaceMethodHier(resClass, methodName, &proto);
     if (resMethod == NULL) {
-        dvmThrowException("Ljava/lang/NoSuchMethodError;", methodName);
+        dvmThrowNoSuchMethodError(methodName);
         return NULL;
     }
 
@@ -406,7 +404,7 @@
         dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
         dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
     if (resField == NULL) {
-        dvmThrowException("Ljava/lang/NoSuchFieldError;",
+        dvmThrowNoSuchFieldError(
             dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx));
         return NULL;
     }
@@ -465,7 +463,7 @@
                 dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
                 dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
     if (resField == NULL) {
-        dvmThrowException("Ljava/lang/NoSuchFieldError;",
+        dvmThrowNoSuchFieldError(
             dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx));
         return NULL;
     }
diff --git a/vm/reflect/Annotation.c b/vm/reflect/Annotation.c
index aad73c8..0fcf80d 100644
--- a/vm/reflect/Annotation.c
+++ b/vm/reflect/Annotation.c
@@ -54,67 +54,6 @@
 static const char* kDescrSignature  = "Ldalvik/annotation/Signature;";
 static const char* kDescrThrows     = "Ldalvik/annotation/Throws;";
 
-
-/*
- * Perform Annotation setup.
- */
-bool dvmReflectAnnotationStartup(void)
-{
-    Method* meth;
-
-    /*
-     * Find some standard Annotation classes.
-     */
-    gDvm.classJavaLangAnnotationAnnotationArray =
-        dvmFindArrayClass("[Ljava/lang/annotation/Annotation;", NULL);
-    gDvm.classJavaLangAnnotationAnnotationArrayArray =
-        dvmFindArrayClass("[[Ljava/lang/annotation/Annotation;", NULL);
-    if (gDvm.classJavaLangAnnotationAnnotationArray == NULL ||
-        gDvm.classJavaLangAnnotationAnnotationArrayArray == NULL)
-    {
-        LOGE("Could not find Annotation-array classes\n");
-        return false;
-    }
-
-    /*
-     * VM-specific annotation classes.
-     */
-    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory =
-        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationFactory;");
-    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember =
-        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationMember;");
-    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray =
-        dvmFindArrayClass("[Lorg/apache/harmony/lang/annotation/AnnotationMember;", NULL);
-    if (gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory == NULL ||
-        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember == NULL ||
-        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray == NULL)
-    {
-        LOGE("Could not find android.lang annotation classes\n");
-        return false;
-    }
-
-    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory,
-            "createAnnotation",
-            "(Ljava/lang/Class;[Lorg/apache/harmony/lang/annotation/AnnotationMember;)Ljava/lang/annotation/Annotation;");
-    if (meth == NULL) {
-        LOGE("Unable to find createAnnotation() in android AnnotationFactory\n");
-        return false;
-    }
-    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation = meth;
-
-    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
-            "<init>",
-            "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V");
-    if (meth == NULL) {
-        LOGE("Unable to find 4-arg constructor in android AnnotationMember\n");
-        return false;
-    }
-
-    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationMember_init = meth;
-
-    return true;
-}
-
 /*
  * Read an unsigned LEB128 value from a buffer.  Advances "pBuf".
  */
@@ -488,8 +427,7 @@
                 DexFile* pDexFile = clazz->pDvmDex->pDexFile;
                 const char* desc = dexStringByTypeIdx(pDexFile, idx);
                 dvmClearException(self);
-                dvmThrowExceptionWithClassMessage(
-                        "Ljava/lang/TypeNotPresentException;", desc);
+                dvmThrowTypeNotPresentException(desc);
                 return false;
             } else {
                 dvmAddTrackedAlloc(elemObj, self);      // balance the Release
@@ -527,7 +465,7 @@
                 return false;
             } else {
                 assert(sfield->field.clazz->descriptor[0] == 'L');
-                elemObj = sfield->value.l;
+                elemObj = (Object*)sfield->value.l;
                 setObject = true;
                 dvmAddTrackedAlloc(elemObj, self);      // balance the Release
             }
@@ -562,7 +500,7 @@
                     dvmReleaseTrackedAlloc((Object*)newArray, self);
                     return false;
                 }
-                Object* obj = avalue.value.l;
+                Object* obj = (Object*)avalue.value.l;
                 dvmSetObjectArrayElement(newArray, count, obj);
                 dvmReleaseTrackedAlloc(obj, self);
             }
@@ -725,7 +663,7 @@
         LOGW("Failed processing annotation value\n");
         goto bail;
     }
-    valueObj = avalue.value.l;
+    valueObj = (Object*)avalue.value.l;
 
     /* new member to hold the element */
     newMember =
@@ -868,15 +806,14 @@
         goto bail;
     }
 
-    newAnno = result.l;
+    newAnno = (Object*)result.l;
 
 bail:
     dvmReleaseTrackedAlloc((Object*) elementArray, NULL);
     *pPtr = ptr;
     if (newAnno == NULL && !dvmCheckException(self)) {
         /* make sure an exception is raised */
-        dvmThrowException("Ljava/lang/RuntimeException;",
-            "failure in processEncodedAnnotation");
+        dvmThrowRuntimeException("failure in processEncodedAnnotation");
     }
     return newAnno;
 }
@@ -1162,7 +1099,7 @@
         return GAV_FAILED;
     }
 
-    return avalue.value.l;
+    return (Object*)avalue.value.l;
 }
 
 
@@ -1806,7 +1743,7 @@
 
     /* convert the return type, if necessary */
     ClassObject* methodReturn = dvmGetBoxedReturnType(method);
-    Object* obj = avalue.value.l;
+    Object* obj = (Object*)avalue.value.l;
     obj = convertReturnType(obj, methodReturn);
 
     return obj;
diff --git a/vm/reflect/Proxy.c b/vm/reflect/Proxy.c
index ce72d99..7b41360 100644
--- a/vm/reflect/Proxy.c
+++ b/vm/reflect/Proxy.c
@@ -49,70 +49,6 @@
 #define kThrowsField    0
 #define kProxySFieldCount 1
 
-
-/*
- * Perform Proxy setup.
- */
-bool dvmReflectProxyStartup()
-{
-    /*
-     * Standard methods we must provide in our proxy.
-     */
-    Method* methE;
-    Method* methH;
-    Method* methT;
-    Method* methF;
-    methE = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
-                "equals", "(Ljava/lang/Object;)Z");
-    methH = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
-                "hashCode", "()I");
-    methT = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
-                "toString", "()Ljava/lang/String;");
-    methF = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject,
-                "finalize", "()V");
-    if (methE == NULL || methH == NULL || methT == NULL || methF == NULL) {
-        LOGE("Could not find equals/hashCode/toString/finalize in Object\n");
-        return false;
-    }
-    gDvm.voffJavaLangObject_equals = methE->methodIndex;
-    gDvm.voffJavaLangObject_hashCode = methH->methodIndex;
-    gDvm.voffJavaLangObject_toString = methT->methodIndex;
-    gDvm.voffJavaLangObject_finalize = methF->methodIndex;
-
-    /*
-     * The prototype signature needs to be cloned from a method in a
-     * "real" DEX file.  We declared this otherwise unused method just
-     * for this purpose.
-     */
-    ClassObject* proxyClass;
-    Method* meth;
-    proxyClass = dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
-    if (proxyClass == NULL) {
-        LOGE("No java.lang.reflect.Proxy\n");
-        return false;
-    }
-    meth = dvmFindDirectMethodByDescriptor(proxyClass, "constructorPrototype",
-                "(Ljava/lang/reflect/InvocationHandler;)V");
-    if (meth == NULL) {
-        LOGE("Could not find java.lang.Proxy.constructorPrototype()\n");
-        return false;
-    }
-    gDvm.methJavaLangReflectProxy_constructorPrototype = meth;
-
-    /*
-     * Get the offset of the "h" field in Proxy.
-     */
-    gDvm.offJavaLangReflectProxy_h = dvmFindFieldOffset(proxyClass, "h",
-        "Ljava/lang/reflect/InvocationHandler;");
-    if (gDvm.offJavaLangReflectProxy_h < 0) {
-        LOGE("Unable to find 'h' field in java.lang.Proxy\n");
-        return false;
-    }
-
-    return true;
-}
-
-
 /*
  * Generate a proxy class with the specified name, interfaces, and loader.
  * "interfaces" is an array of class objects.
@@ -139,8 +75,7 @@
 
     nameStr = dvmCreateCstrFromString(str);
     if (nameStr == NULL) {
-        dvmThrowException("Ljava/lang/IllegalArgumentException;",
-            "missing name");
+        dvmThrowIllegalArgumentException("missing name");
         goto bail;
     }
 
@@ -195,9 +130,6 @@
     dvmSetFieldObject((Object *)newClass,
                       offsetof(ClassObject, classLoader),
                       (Object *)loader);
-#if WITH_HPROF_STACK
-    hprofFillInStackTrace(newClass);
-#endif
 
     /*
      * Add direct method definitions.  We have one (the constructor).
@@ -277,7 +209,7 @@
         newClass = NULL;
         if (!dvmCheckException(dvmThreadSelf())) {
             /* throw something */
-            dvmThrowException("Ljava/lang/RuntimeException;", NULL);
+            dvmThrowRuntimeException(NULL);
         }
     }
 
@@ -606,7 +538,7 @@
         if (allMethods[i] != NULL) {
             LOGV("BAD DUPE: %d %s.%s\n", i,
                 allMethods[i]->clazz->descriptor, allMethods[i]->name);
-            dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            dvmThrowIllegalArgumentException(
                 "incompatible return types in proxied interfaces");
             return -1;
         }
@@ -726,7 +658,7 @@
 
     /* grab a local copy to work on */
     for (i = 0; i < mixLen; i++) {
-        mixSet[i] = dvmPointerSetGetEntry(throws, i);
+        mixSet[i] = (ClassObject*)dvmPointerSetGetEntry(throws, i);
     }
 
     for (i = 0; i < mixLen; i++) {
@@ -1038,15 +970,15 @@
         LOGVV("+++ ignoring return to void\n");
     } else if (invokeResult.l == NULL) {
         if (dvmIsPrimitiveClass(returnType)) {
-            dvmThrowException("Ljava/lang/NullPointerException;",
+            dvmThrowNullPointerException(
                 "null result when primitive expected");
             goto bail;
         }
         pResult->l = NULL;
     } else {
-        if (!dvmUnboxPrimitive(invokeResult.l, returnType, pResult)) {
-            dvmThrowExceptionWithClassMessage("Ljava/lang/ClassCastException;",
-                ((Object*)invokeResult.l)->clazz->descriptor);
+        if (!dvmUnboxPrimitive((Object*)invokeResult.l, returnType, pResult)) {
+            dvmThrowClassCastException(((Object*)invokeResult.l)->clazz,
+                    returnType);
             goto bail;
         }
     }
diff --git a/vm/reflect/Reflect.c b/vm/reflect/Reflect.c
index 48eb477..dd87d44 100644
--- a/vm/reflect/Reflect.c
+++ b/vm/reflect/Reflect.c
@@ -21,118 +21,6 @@
 #include <stdlib.h>
 
 /*
- * Cache some classes.
- */
-bool dvmReflectStartup(void)
-{
-    gDvm.classJavaLangReflectAccessibleObject =
-        dvmFindSystemClassNoInit("Ljava/lang/reflect/AccessibleObject;");
-    gDvm.classJavaLangReflectConstructor =
-        dvmFindSystemClassNoInit("Ljava/lang/reflect/Constructor;");
-    gDvm.classJavaLangReflectConstructorArray =
-        dvmFindArrayClass("[Ljava/lang/reflect/Constructor;", NULL);
-    gDvm.classJavaLangReflectField =
-        dvmFindSystemClassNoInit("Ljava/lang/reflect/Field;");
-    gDvm.classJavaLangReflectFieldArray =
-        dvmFindArrayClass("[Ljava/lang/reflect/Field;", NULL);
-    gDvm.classJavaLangReflectMethod =
-        dvmFindSystemClassNoInit("Ljava/lang/reflect/Method;");
-    gDvm.classJavaLangReflectMethodArray =
-        dvmFindArrayClass("[Ljava/lang/reflect/Method;", NULL);
-    gDvm.classJavaLangReflectProxy =
-        dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
-    if (gDvm.classJavaLangReflectAccessibleObject == NULL ||
-        gDvm.classJavaLangReflectConstructor == NULL ||
-        gDvm.classJavaLangReflectConstructorArray == NULL ||
-        gDvm.classJavaLangReflectField == NULL ||
-        gDvm.classJavaLangReflectFieldArray == NULL ||
-        gDvm.classJavaLangReflectMethod == NULL ||
-        gDvm.classJavaLangReflectMethodArray == NULL ||
-        gDvm.classJavaLangReflectProxy == NULL)
-    {
-        LOGE("Could not find one or more reflection classes\n");
-        return false;
-    }
-
-    gDvm.methJavaLangReflectConstructor_init =
-        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectConstructor, "<init>",
-        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;I)V");
-    gDvm.methJavaLangReflectField_init =
-        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectField, "<init>",
-        "(Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
-    gDvm.methJavaLangReflectMethod_init =
-        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectMethod, "<init>",
-        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
-    if (gDvm.methJavaLangReflectConstructor_init == NULL ||
-        gDvm.methJavaLangReflectField_init == NULL ||
-        gDvm.methJavaLangReflectMethod_init == NULL)
-    {
-        LOGE("Could not find reflection constructors\n");
-        return false;
-    }
-
-    gDvm.classJavaLangClassArray =
-        dvmFindArrayClass("[Ljava/lang/Class;", NULL);
-    gDvm.classJavaLangObjectArray =
-        dvmFindArrayClass("[Ljava/lang/Object;", NULL);
-    if (gDvm.classJavaLangClassArray == NULL ||
-        gDvm.classJavaLangObjectArray == NULL)
-    {
-        LOGE("Could not find class-array or object-array class\n");
-        return false;
-    }
-
-    gDvm.offJavaLangReflectAccessibleObject_flag =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectAccessibleObject, "flag",
-            "Z");
-
-    gDvm.offJavaLangReflectConstructor_slot =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor, "slot", "I");
-    gDvm.offJavaLangReflectConstructor_declClass =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor,
-            "declaringClass", "Ljava/lang/Class;");
-
-    gDvm.offJavaLangReflectField_slot =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectField, "slot", "I");
-    gDvm.offJavaLangReflectField_declClass =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectField,
-            "declaringClass", "Ljava/lang/Class;");
-
-    gDvm.offJavaLangReflectMethod_slot =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod, "slot", "I");
-    gDvm.offJavaLangReflectMethod_declClass =
-        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod,
-            "declaringClass", "Ljava/lang/Class;");
-
-    if (gDvm.offJavaLangReflectAccessibleObject_flag < 0 ||
-        gDvm.offJavaLangReflectConstructor_slot < 0 ||
-        gDvm.offJavaLangReflectConstructor_declClass < 0 ||
-        gDvm.offJavaLangReflectField_slot < 0 ||
-        gDvm.offJavaLangReflectField_declClass < 0 ||
-        gDvm.offJavaLangReflectMethod_slot < 0 ||
-        gDvm.offJavaLangReflectMethod_declClass < 0)
-    {
-        LOGE("Could not find reflection fields\n");
-        return false;
-    }
-
-    if (!dvmReflectProxyStartup())
-        return false;
-    if (!dvmReflectAnnotationStartup())
-        return false;
-
-    return true;
-}
-
-/*
- * Clean up.
- */
-void dvmReflectShutdown(void)
-{
-    // nothing to do
-}
-
-/*
  * For some of the reflection stuff we need to un-box primitives, e.g.
  * convert a java/lang/Integer to int or even a float.  We assume that
  * the first instance field holds the value.
@@ -1139,7 +1027,7 @@
     if (typeIndex == PRIM_NOT) {
         /* add to tracking table so return value is always in table */
         if (value.l != NULL)
-            dvmAddTrackedAlloc(value.l, NULL);
+            dvmAddTrackedAlloc((Object*)value.l, NULL);
         return (DataObject*) value.l;
     }
 
@@ -1248,7 +1136,7 @@
         char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
         LOGE("Bad return type in signature '%s'\n", desc);
         free(desc);
-        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        dvmThrowInternalError(NULL);
         return NULL;
     }
     }
diff --git a/vm/reflect/Reflect.h b/vm/reflect/Reflect.h
index 42b18c0..1ebdd2a 100644
--- a/vm/reflect/Reflect.h
+++ b/vm/reflect/Reflect.h
@@ -19,11 +19,6 @@
 #ifndef _DALVIK_REFLECT_REFLECT
 #define _DALVIK_REFLECT_REFLECT
 
-bool dvmReflectStartup(void);
-bool dvmReflectProxyStartup(void);
-bool dvmReflectAnnotationStartup(void);
-void dvmReflectShutdown(void);
-
 /*
  * During startup, validate the "box" classes, e.g. java/lang/Integer.
  */
diff --git a/vm/test/TestHash.c b/vm/test/TestHash.c
index 26de141..431e62a 100644
--- a/vm/test/TestHash.c
+++ b/vm/test/TestHash.c
@@ -142,10 +142,10 @@
 
     /* two entries, same hash, different values */
     char* str1;
-    str1 = dvmHashTableLookup(pTab, hash, strdup("one"),
+    str1 = (char*) dvmHashTableLookup(pTab, hash, strdup("one"),
             (HashCompareFunc) strcmp, true);
     assert(str1 != NULL);
-    str = dvmHashTableLookup(pTab, hash, strdup("two"),
+    str = (const char*) dvmHashTableLookup(pTab, hash, strdup("two"),
             (HashCompareFunc) strcmp, true);
 
     /* remove the first one */
@@ -167,10 +167,12 @@
     }
 
     /* see if we can find them */
-    str = dvmHashTableLookup(pTab, hash, "one", (HashCompareFunc) strcmp,false);
+    str = (const char*) dvmHashTableLookup(pTab, hash, "one",
+            (HashCompareFunc) strcmp,false);
     if (str != NULL)
         LOGE("TestHash deleted entry has returned!");
-    str = dvmHashTableLookup(pTab, hash, "two", (HashCompareFunc) strcmp,false);
+    str = (const char*) dvmHashTableLookup(pTab, hash, "two",
+            (HashCompareFunc) strcmp,false);
     if (str == NULL)
         LOGE("TestHash entry vanished\n");